repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
perm_hmm | perm_hmm-master/perm_hmm/policies/ignore_transitions.py | """For the special case of two states and two outcomes, computes the optimal
permutations for the related HMM that has transition matrix equal to the
identity matrix.
Because there are only two states, we adopt the convention that the two states
are called the ``dark`` and ``bright`` states. The ``dark`` state is the one
such that the outcome is more likely to be ``0``, and the ``bright`` state is
the one such that the outcome is more likely to be ``1``.
This module uses the :py:mod:`~adapt_hypo_test` package to compute the optimal
permutations.
"""
import torch
from perm_hmm.policies.policy import PermPolicy
from adapt_hypo_test.two_states import no_transitions as nt
class IgnoreTransitions(PermPolicy):
r"""Ignoring the transition matrix, computes the optimal permutations for
the HMM for all possible outcomes.
This method of computing permutations has complexity O(t**2), where t is the
number of steps.
In addition to the attributes of the base class, instances of this class
have the following attributes:
``p``:
A float, the probability of the dark state giving outcome 1.
``q``:
A float, the probability of the bright state giving outcome 0.
``dtb``:
The permutation that takes the dark state to the bright state.
``id``:
The identity permutation.
``x``:
A representation of the log odds of the belief state that we compute
the permutations at. See the :py:mod:`~adapt_hypo_test` module for more
details.
``sigmas``:
A list indicating whether to apply to nontrivial permutation when
reaching a particular log odds.
"""
def __init__(self, possible_perms, p, q, dark_state, bright_state, save_history=False):
r"""Initialization.
This class computes the optimal permutations for the case that the
transition matrix is trivial, and that there is one bright state and
one dark state. The "true" model may have more states, and a nontrivial
transition matrix. To make the identification between the two models,
we need to know which state is to be interpreted as the dark state
and which as the bright state. The possible perms of the true model are
needed to identify which corresponds to the dark-bright swap.
:param possible_perms: Possible permutations of the true model.
:param dark_state: Which state of the true model corresponds to the
dark state.
:param bright_state: Similar for bright state.
:raises ValueError: If the identity or the swap permutations are not
included as possible permutations.
"""
super().__init__(possible_perms, save_history=save_history)
self.p = p
self.q = q
num_states = possible_perms.shape[-1]
dtb = torch.nonzero(possible_perms[:, dark_state] == bright_state, as_tuple=False)
if len(dtb) == 0:
raise ValueError("Need to be able to take dark to bright")
self.dtb = possible_perms[dtb[0].item()]
identity = torch.nonzero(torch.all(possible_perms == torch.arange(num_states), dim=-1), as_tuple=False)
if len(identity) == 0:
raise ValueError("The identity must be an allowed permutation")
self.id = possible_perms[identity[0].item()]
self.x = None
self.sigmas = None
self.step = 0
def reset(self, save_history=False, reset_sigmas=False):
super().reset(save_history=save_history)
self.x = None
self.step = 0
if reset_sigmas:
self.sigmas = None
def solve(self, n):
r"""Needs to be called before ``calculate_perm``.
Solves for the ideal permutations in the model where we ignore
transitions. Calls
:py:func:`~adapt_hypo_test.two_states.no_transitions.solve` to do so.
:param n: The number of steps to compute for.
:return: The expanded value function :math:`\chi`. See
:py:mod:`~adapt_hypo_test` for more details.
"""
self.sigmas, chi = nt.solve(self.p, self.q, n)
return chi
def calculate_perm(self, data: torch.Tensor) -> (torch.Tensor, dict):
if self.sigmas is None:
raise RuntimeError("Call .solve first with a total number of steps.")
if self.x is None:
self.x = torch.zeros(data.shape + (2,), dtype=int)
self.x[~data.int().bool(), 0] -= 1
self.x[data.int().bool(), 1] += 1
self.step += 1
if self.step == len(self.sigmas):
return self.id.expand(data.shape + self.id.shape).clone().detach(), {"x": self.x.clone().detach()}
else:
self.x, p = nt.evaluate_sigma(self.sigmas[self.step], self.x.numpy())
self.x = torch.from_numpy(self.x)
perm = self.id.expand(data.shape + self.id.shape).clone().detach()
perm[p] = self.dtb
return perm, {"x": self.x.clone().detach()}
| 5,005 | 40.371901 | 111 | py |
perm_hmm | perm_hmm-master/perm_hmm/policies/min_tree.py | """Make a belief tree labelled with costs, then select the paths giving lowest
costs.
This module contains the
:py:class:`~perm_hmm.policies.min_tree.MinTreePolicy` class, which is a
:py:class:`~perm_hmm.policies.policy.PermPolicy` that selects the
permutations that minimize the cost, computed using a belief tree.
"""
import warnings
from copy import deepcopy
import numpy as np
import torch
from perm_hmm.policies.belief_tree import HMMBeliefTree
from perm_hmm.util import all_strings, perm_idxs_from_perms
from perm_hmm.models.hmms import random_phmm
from perm_hmm.policies.policy import PermPolicy
from perm_hmm.policies.belief import HMMBeliefState
import perm_hmm.log_cost as cf
class MinTreePolicy(PermPolicy):
r"""Select the permutations that minimize the cost.
With a limited look-ahead depth, we can compute the cost of all possible
belief states, and then select the permutations that minimize the cost.
In addition to the attributes of the base class, instances of this class
have the following attributes:
``data_to_idx``:
A function mapping data to indices.
``log_cost_func``:
A function that computes the log-cost of a belief state.
``height``:
The height of the belief tree.
``hmm``:
The HMM used to compute the belief states.
``tree``:
The tree of belief states.
"""
def __init__(self, possible_perms, hmm, log_cost_func, look_ahead, data_to_idx=None, root_belief=None, initialize_tree=True, save_history=False):
r"""Initialize the MinTreePolicy.
This method does not initialize the tree. After instantiation, call the
method :py:meth:`~perm_hmm.policies.min_tree.MinTreePolicy.initialize_tree`
to initialize the tree.
:param possible_perms: The allowable permutations.
:param hmm: The HMM used to compute the belief states.
:param log_cost_func: The function used to compute the log-cost of a
belief state.
:param int look_ahead: Number of steps to look ahead in tree calculation
:param data_to_idx: The function to convert incoming data to indices
corresponding to data. Defaults to ``lambda data: data.long()``.
:param HMMBeliefState root_belief: The initial belief to seed the belief
tree with.
:param bool initialize_tree: Whether to call the initialize_tree method
as a part of initialization of the object. If ``True``, will call
initialize_tree() with no arguments. To specify arguments, pass
``False`` to this flag and call initialize_tree() separately.
:param save_history: Indicates whether to save the history of the
computation.
"""
super().__init__(possible_perms, save_history=save_history)
if data_to_idx is None:
def data_to_idx(x):
return x.long()
self.data_to_idx = data_to_idx
self.log_cost_func = log_cost_func
self.hmm = deepcopy(hmm)
self.look_ahead = look_ahead
self.root_belief = root_belief
if initialize_tree:
self.initialize_tree()
else:
self.tree = None # type: HMMBeliefTree | None
def initialize_tree(self, look_ahead=None, root_belief=None, data_len=None):
r"""Initializes the belief tree.
Computes the belief tree, and saves it in the attribute :py:attr:`tree`.
:param look_ahead: The number of steps to look ahead. The resulting
tree has depth ``2*look_ahead + 1``
:param root_belief: The root belief state. If None, defaults to the
initial belief state of the HMM.
:param data_len: The length of the data to be observed. If None,
defaults to 1, so that the dimension will broadcast with later
operations.
:return: None
"""
if look_ahead is not None:
self.look_ahead = look_ahead
if root_belief is not None:
self.root_belief = root_belief
self.tree = HMMBeliefTree(self.hmm, self.possible_perms, self.look_ahead, root_belief=self.root_belief, data_len=data_len)
if self.save_history:
if b'initial_tree' not in self.calc_history:
self.calc_history[b'initial_tree'] = deepcopy(self.tree)
else:
warnings.warn('Initial tree already exists, so a new one will '
'not be saved. Did you remember to call reset()?')
def reset(self, initialize_tree=True, save_history=False):
r"""Resets the MinTreePolicy.
This method resets the tree to None, and the history of the computation.
:param initialize_tree: Whether to initialize the tree. If ``False``,
tree is set to ``None``.
:param save_history: Whether to save the history of the computation the
next time the policy is used.
:return: None
"""
super().reset(save_history=save_history)
if initialize_tree:
self.initialize_tree()
else:
self.tree = None
def calculate_perm(self, data: torch.Tensor) -> (torch.Tensor, dict):
r"""Generates permutations that minimize the cost.
This method updates the tree on-line. After receiving the data, it
prunes the tree, expands the leaves, then recomputes the log-cost of the
root and the corresponding permutations. It then returns the minimum
cost permutation, and the corresponding cost.
:param data: The data to compute the next permutation for.
:return: The permutation that minimizes the cost, and a dict containing
keys
``b'log_costs'``: The tree of costs at each stage.
``b'perm_idx_tree'``: The index of the cost minimizing
permtuation. This is a tree of
.. math::
\operatorname{argmin}_{\sigma^n}c\left( Y_k^n | \sigma^n y^{k-1} \right)
for a cost function :math:`c`, computed from
.. math::
\operatorname{min}_{\sigma^n}\mathbb{E}_{Y^n_k|\sigma^n, y^{k-1}}
\left[c\left( Y^n_k | \sigma^n, y^{k-1} \right)\right]
= \min_{\sigma_k}\mathbb{E}_{Y_k|\sigma^k, y^{k-1}}\left[ \cdots
\min_{\sigma_{n-1}}\mathbb{E}_{Y_{n-1}|\sigma^{n-1}, y^{n-2}}
\left[\min_{\sigma_n}\mathbb{E}_{Y_n|\sigma^n, y^{n-1}}
\left[
c\left( Y^n_k | \sigma^n y^{k-1} \right)
\right]\right] \cdots\right]
That is, these are the indices of the permutations that
minimize the expected cost, if the process were to terminate
in ``depth`` steps.
``b'penultimate_layer'``: These are the newly computed beliefs
obtained from transitioning with each possible permutation.
Has dimensions -1: s_k, -2: s_0, 0: batch. All other
dimensions are for the "tree" dimensions. That is, for the
``i``th run, for a sequence of permutation indices
``sigma[0]`` to ``sigma[depth-1]`` and a sequence of
observation indices ``o[0]`` to ``o[depth-2]``, the belief
state :math:`p(s_0, s_k|y^k \sigma^k)` that would have been
obtained is given by::
calc_dict[b'penultimate_layer'][i, o[0], sigma[0], ...,
o[depth-2], sigma[depth-1], s_0, s_k]
This is really the "penultimate layer" of the tree of belief
states that is recomputed.
Instead of the whole belief tree at each stage, This along
with ``b'leaves'`` is returned. From the returned
information, one should be able to reconstruct the whole
belief tree at each stage.
``b'leaves'``: This is the newly computed final layer of the
belief tree. The cost function is computed on this layer,
then passed up the tree to compute the permutations.
"""
if not self.tree:
raise ValueError("Must call initialize_tree() first.")
tree_len = self.tree.beliefs[0].logits.shape[-3]
if data.shape[0] != tree_len:
if tree_len == 1:
self.tree.broadcast_to_length(data.shape[0])
else:
raise ValueError("Must call reset() first.")
self.tree.prune_tree(self.data_to_idx(data))
self.tree.grow()
perm_idx_tree, log_costs = self.tree.perm_idxs_from_log_cost(self.log_cost_func, return_log_costs=True)
perm_idx = perm_idx_tree.perm_idxs[0]
perm = self.possible_perms[perm_idx]
self.tree.prune_tree(perm_idx)
return perm, {
b'log_costs': log_costs,
b'perm_idx_tree': perm_idx_tree,
b'penultimates': torch.tensor(np.rollaxis(self.tree.beliefs[-2].logits.numpy(), -3)),
b'leaves': torch.tensor(np.rollaxis(self.tree.beliefs[-1].logits.numpy(), -3))
}
def penultimates_from_sequence(self, data, perms, event_dims=0):
r"""From a sequence of data and corresponding permutations, computes the
belief trees that would have been obtained, had those choices been
made.
:param data: The hypothetical data to compute beliefs for.
:param perms: The hypothetical permtuations to compute beliefs for.
:param event_dims: The number of dimensions of the data that correspond
to event dimensions.
:return: A list of trees.
:raises ValueError: If tree is not initialized.
"""
fixed = TreeFixedPolicy(self.possible_perms, self.hmm, perms, self.look_ahead, self.data_to_idx, root_belief=self.root_belief, save_history=True)
_ = fixed.get_perms(data, event_dims=event_dims)
d = fixed.calc_history
return d[b'penultimates']
class TreeFixedPolicy(MinTreePolicy):
def __init__(self, possible_perms, hmm, perms, look_ahead, data_to_idx=None, root_belief=None, initialize_tree=True, save_history=False):
r"""Initializes the policy.
Needs the ``perms`` argument, that is the permutations that will be
returned, independent of the data.
:param torch.Tensor possible_perms: The allowed permutations.
:param perm_hmm.models.hmms.PermutedDiscreteHMM hmm:
The HMM used to calculate the belief states.
:param torch.Tensor perms: The fixed sequence of permutations to be returned.
:param int look_ahead: The number of steps to look ahead when computing
the belief tree. The belief tree will have height 2*look_ahead+1.
:param data_to_idx: The function to convert incoming data to indices
corresponding to data. Defaults to ``lambda data: data.long()``.
:param HMMBeliefState root_belief: The belief state to seed the belief
tree with. Defaults to the initial state of the HMM.
:param bool initialize_tree: Whether to call the initialize_tree method
as a part of initialization of the object. If ``True``, will call
initialize_tree() with no arguments. To specify arguments, pass
``False`` to this flag and call initialize_tree() separately.
:param bool save_history: Whether to save the computation history.
"""
super().__init__(
possible_perms,
hmm,
lambda x: None,
look_ahead,
data_to_idx=data_to_idx,
root_belief=root_belief,
initialize_tree=initialize_tree,
save_history=save_history
)
self.step = 0
self.perms = perms
self.perm_idxs = perm_idxs_from_perms(self.possible_perms, self.perms)
def reset(self, initialize_tree=True, save_history=False):
r"""Resets the policy.
Resets the step to 0, and the computation history.
:param bool initialize_tree: Whether to initialize the belief tree.
:param bool save_history: Whether to save the computation history.
:return: None
"""
super().reset(
initialize_tree=initialize_tree,
save_history=save_history
)
self.step = 0
def calculate_perm(self, data: torch.Tensor) -> (torch.Tensor, dict):
if not self.tree:
raise ValueError("Must call initialize_tree() first.")
self.tree.prune_tree(self.data_to_idx(data))
self.tree.grow()
perm_idx = self.perm_idxs[self.step]
perm_idx = perm_idx.expand(data.shape[:1] + perm_idx.shape)
perm = self.perms[self.step, :]
perm = perm.expand(data.shape[:1] + perm.shape)
self.tree.prune_tree(perm_idx)
self.step += 1
return perm, {
b'penultimates': torch.tensor(np.rollaxis(self.tree.beliefs[-2].logits.numpy(), -3)),
b'leaves': torch.tensor(np.rollaxis(self.tree.beliefs[-1].logits.numpy(), -3))
}
def log_entropy_of_expanded(logits, n_outcomes):
r"""Given a joint distribution
:math:`\mathbb{P}((l_0, y_0), (l_k, y_k)|y^{k-1})`, computes the log of the
entropy of the initial physical state, :math:`\log H(L_0|y^{k-1})`.
When we allow the next "physical" state to also depend on the previous
outcome, we account for this by using an expanded state space that is the
cartesian product of the physical states and the possible outcomes.
Belief states over such an expanded state space are then joint distributions
:math:`\mathbb{P}((l_0, y_0), (l_k, y_k)|y^{k-1})`, where :math:`l_k` is
the next physical state, and :math:`l_0` is the initial physical state.
This function computes the log of the entropy of the initial physical state
for such a joint distribution.
:param logits: :math:`\mathbb{P}((l_0, y_0), (l_k, y_k)|y^{k-1})`, with
dimensions -1: (l_k, y_k), -2: (l_0, y_0).
:param n_outcomes: The number of possible outcomes. Uses this to reshape the
joint distribution, then marginalizes over the outcomes.
:return: The log of the entropy of the initial physical state.
"""
logits = logits.reshape(
logits.shape[:-2] + (-1, n_outcomes) + logits.shape[-1:])
logits = logits.logsumexp(-2)
return cf.log_initial_entropy(logits)
class ExpandedEntPolicy(MinTreePolicy):
r"""A policy that uses the conditional entropy of the initial physical
state as the cost function.
When we allow the next "physical" state to also depend on the previous
outcome, we account for this by using an expanded state space that is the
cartesian product of the physical states and the possible outcomes.
In this model, the initial state has no observation so that the belief state
must be initialized differently from
:py:class:`~perm_hmm.policies.min_ent.MinEntropyPolicy`, and the cost
function must be computed differently as well.
.. seealso:: :py:class:`~perm_hmm.policies.min_ent.MinEntropyPolicy`
"""
def __init__(self, possible_perms, hmm, look_ahead=1,
data_to_idx=None, trivial_obs=None, root_belief=None,
initialize_tree=True, save_history=False):
r"""Initializes the policy.
:param possible_perms: The allowable permutations.
:param hmm: The HMM used to compute likelihoods.
:param look_ahead: The number of steps to look ahead.
:param data_to_idx: The function to convert data to indices.
:param trivial_obs: The observation :math:`y_0` such that
:math:`\mathbb{P}((l_0, y_0')) = \delta_{y_0, y_0'}\mathbb{P}(l_0)`.
:param HMMBeliefState root_belief: The initial belief to seed the belief
tree with. Defaults to the state
.. math::
p(s_0, s_k) = A(s_k|s_0)\pi(s_0)
where :math:`\pi` is the initial distribution of the HMM and
:math:`A` is the transition matrix of the HMM. This is done because
in the expanded state space model, the initial state does not emit
an observation.
:param bool initialize_tree: Whether to call the initialize_tree method
as a part of initialization of the object. If ``True``, will call
initialize_tree() with no arguments. To specify arguments, pass
``False`` to this flag and call initialize_tree() separately.
:param save_history: Indicates whether to save the history of the
calculation.
"""
def _expanded_entropy(logits):
return log_entropy_of_expanded(
logits,
n_outcomes=hmm.enumerate_support(expand=False).squeeze(-1).shape[-1]
)
self.trivial_obs = trivial_obs
if root_belief is None:
root_belief = HMMBeliefState.from_skipfirsthmm(
hmm,
trivial_obs=self.trivial_obs
)
super().__init__(
possible_perms,
hmm,
_expanded_entropy,
look_ahead,
root_belief=root_belief,
initialize_tree=initialize_tree,
data_to_idx=data_to_idx,
save_history=save_history
)
class MinEntPolicy(MinTreePolicy):
def __init__(self, possible_perms, hmm, look_ahead=1, data_to_idx=None, root_belief=None, initialize_tree=True, save_history=False):
log_cost_func = cf.log_initial_entropy
super().__init__(
possible_perms,
hmm,
log_cost_func,
look_ahead,
root_belief=root_belief,
initialize_tree=initialize_tree,
data_to_idx=data_to_idx,
save_history=save_history,
)
def main():
nstates = 2
nsteps = 3
hmm = random_phmm(nstates)
possible_perms = torch.eye(nstates, dtype=torch.long)
policy = MinTreePolicy(possible_perms, hmm,
lambda x: cf.log_renyi_entropy(x, 2.0), 2)
data = all_strings(nsteps)
perms = policy.get_perms(data)
print(perms)
if __name__ == '__main__':
main()
| 18,477 | 42.683215 | 153 | py |
perm_hmm | perm_hmm-master/perm_hmm/policies/policy.py | """This module contains the abstract class
:py:class:`~perm_hmm.policies.policy.PermPolicy`. This class provides
boilerplate to implement a policy for the permutation-based HMM.
"""
import warnings
import torch
from perm_hmm.util import flatten_batch_dims
class PermPolicy(object):
"""
This is an abstract class that is used to select permutations. The get_perm
method is called in-line when sampling with PermutedDiscreteHMM. The
get_perms method uses the get_perm method to compute all the permutations
that would be chosen for a whole sequence.
The boilerplate is to manage the shape of the incoming data, and to manage
the history of the calculations used to compute the permutations. Because
the calculation of permutations can be stateful, it is also useful to be
able to "reset" the state of the calculation, which is done by the
:py:meth:`~perm_hmm.policies.policy.PermPolicy.reset` method.
Subclasses should implement the following methods:
reset:
Resets the state of the ``PermPolicy``. This method should be called
after the ``PermPolicy`` is used to compute a sequence of
permutations. Subclasses should call the ``reset`` method of the parent
class before cleaning up their own state.
calculate_perm:
Computes the permutation to apply, given the data observed. Should
return both the permutation and the relevant parts of the calculation
used to compute it, as a dictionary of tensors. If none are relevant,
the return value should be the empty dictionary.
.. seealso::
:py:class:`~perm_hmm.policies.rotator_policy.RotatorPolicy`
for an example of subclassing.
The attributes of the ``PermPolicy`` are:
``possible_perms``:
A tensor of shape ``(n_perms, n_states)``, that contains the allowable
permutations.
``calc_history``:
A list of dictionaries of tensors, containing the calculation history.
``perm_history``:
A tensor of shape ``batch_shape + (n_steps, n_states)``, containing
the permutations applied to the states thus far.
"""
def __init__(self, possible_perms, save_history=False):
"""Initializes the PermPolicy.
Should be called by the subclass constructor.
:param possible_perms: The allowable permutations.
:param save_history: Indicates whether to save the calculation
and permutation history.
"""
n_perms, n_states = possible_perms.shape
if not (possible_perms.long().sort(-1).values ==
torch.arange(n_states, dtype=torch.long).expand(
(n_perms, n_states)
)).all():
raise ValueError("The input permutations are not permutations of "
"the integers [0, ..., n_states]")
self.possible_perms = possible_perms
self.calc_history = {}
self.perm_history = None
self.save_history = save_history
def _add_to_calc_history(self, calc_dict, shape):
"""Adds the step of the calculation to the calculation history.
:param calc_dict: The dictionary containing the step of the calculation
history.
:return: None
"""
for k, v in calc_dict.items():
try:
v = v.unsqueeze(1)
v = v.reshape(shape + v.shape[1:])
except (RuntimeError, ValueError, AttributeError, TypeError):
if k in self.calc_history:
self.calc_history[k].append(v)
else:
self.calc_history[k] = [v]
else:
if k in self.calc_history:
self.calc_history[k] = torch.cat(
(self.calc_history[k], v),
dim=len(shape),
)
else:
self.calc_history[k] = v
def _add_to_perm_history(self, perm, shape):
"""Adds the permutation used to the permutation history.
:param perm: The permutation used.
:return: None
"""
perm = perm.unsqueeze(-2)
perm = perm.reshape(shape + perm.shape[1:])
if self.perm_history is None:
self.perm_history = perm
else:
self.perm_history = torch.cat(
(self.perm_history, perm),
dim=-2,
)
def calculate_perm(self, data: torch.Tensor) -> (torch.Tensor, dict):
"""This method should be implemented by subclasses.
Given the data, this method should return the permutation to apply,
along with the relevant parts of the calculation used to compute it,
in the form of a dictionary of tensors. If none are relevant, the second
return value should be the empty dictionary.
:param data: The data observed. The dimensions will always be
``(batch_len,) + event_shape``.
:return: The permutation to apply, and the relevant parts of the
calculation used to compute it, as a dictionary of tensors.
The permutation should be a tensor of shape
``(batch_len, num_states)``, and the dictionary should contain
tensors of shape ``(batch_len,) + arbitrary``.
"""
raise NotImplementedError
def get_perm(self, data: torch.Tensor, event_dims=0):
"""Takes an input of data from a single step, and returns a
permutation.
If self.save_history is True, the calculation and permutation history
will be saved to the attributes self.calc_history and self.perm_history
respectively.
:param torch.Tensor data: Data from the HMM.
shape ``batch_shape + event_shape``
:param int event_dims: Number of event dimensions. Needed to distinguish
the batch dimensions from the event dimensions. Should be equal to
len(event_shape).
:return: The permutation to be applied at the next step.
shape ``batch_shape + (num_states,)``
"""
data, shape = flatten_batch_dims(data, event_dims=event_dims)
perm, calc_dict = self.calculate_perm(data)
self._add_to_perm_history(perm, shape)
if self.save_history:
self._add_to_calc_history(calc_dict, shape)
perm = perm.reshape(shape + (perm.shape[-1],))
return perm
def reset(self, save_history=False):
"""Resets the calculation history.
Subclasses should call this method in their reset methods.
:param save_history: Indicates whether to save the permutation and
calculation histories the next time the policy is used to compute
permutations.
:return: None
"""
self.perm_history = None
self.save_history = save_history
self.calc_history = {}
def get_perms(self, data, event_dims=0):
r"""
Given a run of data, returns the permutations which would be applied.
This should be used to precompute the permutations for a given model
and given data sequence.
:param torch.Tensor data: The sequence of data to compute the
permutations for, of shape
``batch_shape + (time_len,) + event_shape``.
:param int event_dims: Number of event dimensions. Needed to distinguish
the batch dimensions from the event dimensions. Should be equal to
len(event_shape).
:returns: A tensor containing the permutations that would have been
applied, given the input data, of shape
``batch_shape + (time_dim, num_states)``.
"""
if self.perm_history:
warnings.warn("The perm_history is not empty. "
"The returned perms will include these, "
"maybe you meant to call reset() before"
"calling this function?")
shape = data.shape[:len(data.shape) - event_dims]
max_t = shape[-1]
for i in range(max_t):
_ = self.get_perm(
data[(..., i) + (slice(None),) * event_dims],
event_dims=event_dims,
)
return self.perm_history
| 8,293 | 39.458537 | 80 | py |
perm_hmm | perm_hmm-master/perm_hmm/policies/rotator_policy.py | """This is an example of a very simple PermPolicy.
The :py:class:`~perm_hmm.policies.policy.PermPolicy` is a class that is
used to select a permutation based on data seen thus far. It takes care of some
boilerplate, but should be subclassed to implement the actual selection
algorithm, and done so in a particular way. This example is meant to demonstrate
how to subclass the :py:class:`~perm_hmm.policies.policy.PermPolicy` class
appropriately.
The :py:class:`~perm_hmm.policies.rotator_policy.RotatorPolicy` is a
:py:class:`~perm_hmm.policies.policy.PermPolicy` that rotates the states,
independently of the data seen thus far.
"""
import torch
from perm_hmm.policies.policy import PermPolicy
def cycles(num_states):
"""Generates a list of cycles of length num_states.
:param int num_states: The number of states in the cycle.
:return: A tensor containing the cycles.
"""
return (
torch.arange(num_states).repeat((num_states, 1)) +
torch.arange(num_states).unsqueeze(-1)
) % num_states
class RotatorPolicy(PermPolicy):
"""A minimal example of a PermPolicy, for demonstration of subclassing the
PermPolicy class.
Always implements the cycle that shifts states by 1, regardless of the data.
Has attributes:
``num_states``:
The number of states in the HMM.
``index``:
The number of permutations applied, modulo num_states. This is not
necessary, but is here for demonstration purposes for the reset method.
"""
def __init__(self, hmm, save_history=False):
r"""Initializes the RotatorPolicy.
Given an HMM, computes the possible permutations as the cycles on the
states of the HMM, and initializes the policy with these cycles.
.. seealso:: :py:meth:`~perm_hmm.policies.policy.PermPolicy.__init__`
:param perm_hmm.models.hmms.PermutedDiscreteHMM hmm: An HMM.
:param save_history: Whether to save the computation history. If
specified, the history of the permutations is saved in the attribute
``.calc_history``.
"""
self.index = 0
self.num_states = hmm.initial_logits.shape[0]
possible_perms = cycles(self.num_states)
super().__init__(possible_perms, save_history=save_history)
def reset(self, save_history=False):
"""This method is for resetting the PermPolicy.
Because in general the algorithm for the selection of permutations
can be stateful, this method is necessary to reinitialize the object
before reusing it.
Calling super().reset resets the history of permutations and history of
calculation. In general one may want to reinitialize other parts of the
PermPolicy.
:param save_history: Indicates whether to save the computations
involved in selecting the permutations.
:return: None
"""
super().reset(save_history=save_history)
self.index = 0
def calculate_perm(self, data: torch.Tensor) -> (torch.Tensor, dict):
"""The main method for selecting permutations.
Given data, returns the appropriate permutation, and any useful
information about the calculation involved in the form of a dictionary.
:param data: The data at a single step to compute the permutation for.
:return: A choice of permutation, and the calculation involved in
computing that permutation.
"""
self.index = (self.index + 1) % self.num_states
return self.possible_perms[1].repeat((data.shape[0], 1)), {}
| 3,629 | 38.456522 | 80 | py |
perm_hmm | perm_hmm-master/perm_hmm/training/interrupted_training.py | r"""Trains the interrupted classifier.
The :py:class:`~perm_hmm.classifiers.interrupted.InterruptedClassifier` has a
parameter that dictates when the likelihood has risen to the point that we can
conclude the inference early. This parameter needs to be learned, which is what
this module provides methods for.
"""
import numpy as np
import torch
from perm_hmm.classifiers.interrupted import IIDBinaryIntClassifier, IIDInterruptedClassifier
from perm_hmm.postprocessing import ExactPostprocessor, EmpiricalPostprocessor
# from perm_hmm.loss_functions import binary_zero_one, log_binary_zero_one
def exact_train_ic(ic: IIDInterruptedClassifier, all_data, log_joint, num_ratios=20):
"""
Train the interrupted classifier using the exact chances of the data occurring.
The interrupted classifier takes as input a single parameter, the threshold likelihood ratio
at which to terminate the run and conclude a classification.
:param bayes_perm_hmm.interrupted.InterruptedClassifier ic: To be trained.
:param torch.Tensor all_data: all possible runs of data.
:param torch.Tensor log_joint: Corresponding joint log probability of the data.
shape (num_states, len(all_data))
:param num_ratios: number of points to perform the brute force search on.
:return: A tuple containing the minimum misclassification rate over the searched domain and the corresponding threshold log ratio.
"""
spaced_ratios = torch.arange(num_ratios, dtype=torch.float)
misclass_rates = torch.zeros(num_ratios, dtype=torch.float)
for j in range(num_ratios):
ic.ratio = spaced_ratios[j]
interrupted_results = ic.classify(
all_data,
)
iep = ExactPostprocessor(
log_joint,
interrupted_results,
)
misclass_rates[j] = iep.log_misclassification_rate()
argmin_rate = torch.tensor(np.argmin(misclass_rates.numpy(), -1))
min_rate = misclass_rates[argmin_rate]
ic.ratio = spaced_ratios[argmin_rate]
# min_rate = misclass_rates.min(-1)
# ic.ratio = spaced_ratios[min_rate.indices]
return min_rate
def train_ic(ic: IIDInterruptedClassifier, training_data, ground_truth, num_ratios=20):
"""Trains the interrupted classifier.
:param bayes_perm_hmm.interrupted.InterruptedClassifier ic: the InterruptedClassifier to train.
:param training_data: data to train on
:param ground_truth: The true initial states that generated the data.
:param num_ratios: The number of points to perform the brute force search on.
:return: The minimum average misclassification rate
"""
spaced_ratios = torch.arange(num_ratios, dtype=torch.float)
misclass_rates = torch.zeros(num_ratios, dtype=torch.float)
for j in range(num_ratios):
ic.ratio = spaced_ratios[j]
interrupted_results = ic.classify(
training_data,
)
iep = EmpiricalPostprocessor(
ground_truth,
interrupted_results,
)
rates = iep.misclassification_rate()
misclass_rates[j] = rates[b"rate"]
argmin_rate = torch.tensor(np.argmin(misclass_rates.numpy(), -1))
min_rate = misclass_rates[argmin_rate]
ic.ratio = spaced_ratios[argmin_rate]
return min_rate
def train_binary_ic(bin_ic: IIDBinaryIntClassifier, training_data, ground_truth, dark_state, bright_state, num_ratios=20):
"""
Trains the classifier. This is to find the optimal likelihood ratio
thresholds to minimize classification error.
:param bin_ic: An IIDBinaryIntClassifier, that has two parameters describing
the threshold.
:param torch.Tensor training_data: float tensor.
Data to train the classifier on.
shape ``(num_samples, time_dim)``
:param torch.Tensor ground_truth: int tensor.
Ground truth from an HMM.
shape ``(num_samples,)``
:param dark_state: int, Indicates which state is the dark state. Needed to
interpret ground_truth.
:param bright_state: int, Indicates which state is the bright state. Needed to
interpret ground_truth.
:param int num_ratios: sets the grid size to perform the brute force
search for the minimal misclassification rate on.
"""
try:
num_samples, max_t = training_data.shape
except ValueError as e:
raise ValueError(
"Training data must have shape (num_samples, max_t)") from e
ratios = torch.arange(num_ratios, dtype=torch.float)
rates = torch.empty((num_ratios, num_ratios), dtype=torch.float)
for i in range(len(ratios)):
for j in range(len(ratios)):
bin_ic.bright_ratio = ratios[i]
bin_ic.dark_ratio = ratios[j]
interrupted_results = bin_ic.classify(training_data, verbosity=0).int()
iep = EmpiricalPostprocessor(
ground_truth,
interrupted_results,
)
# rate = iep.risk(binary_zero_one(dark_state, bright_state))
rate = iep.misclassification_rate()[b"rate"]
rates[i, j] = rate
# ind = divmod(np.argmin(rates.numpy()), rates.shape[1])
ind = np.unravel_index(np.argmin(rates.numpy()), rates.shape)
bin_ic.bright_ratio = ratios[ind[0]]
bin_ic.dark_ratio = ratios[ind[1]]
def exact_train_binary_ic(bin_ic: IIDBinaryIntClassifier, all_data, log_joint, num_ratios=20):
"""
Trains the classifier. This is to find the optimal likelihood ratio
thresholds to minimize classification error.
:param bin_ic: IIDBinaryIntClassifier. It has two parameters that this
function trains, which are the two thresholds for terminating the run.
:param torch.Tensor all_data: All possible runs of data.
shape ``(num_runs, steps)``
:param torch.Tensor log_joint: Corresponding log joint likelihoods.
shape ``(num_states, num_runs)``
:param int num_ratios: sets the grid size to perform the brute force
search for the minimal misclassification rate on.
"""
ratios = torch.arange(num_ratios, dtype=torch.float)
rates = torch.empty((num_ratios, num_ratios), dtype=torch.float)
for i in range(len(ratios)):
for j in range(len(ratios)):
bin_ic.bright_ratio = ratios[i]
bin_ic.dark_ratio = ratios[j]
interrupted_results = bin_ic.classify(
all_data,
).int()
iep = ExactPostprocessor(
log_joint,
interrupted_results,
)
# rates[i, j] = iep.log_risk(log_binary_zero_one(dark_state, bright_state))
rates[i, j] = iep.log_misclassification_rate()
# ind = divmod(rates.argmin().item(), rates.shape[1])
ind = np.unravel_index(np.argmin(rates.numpy()), rates.shape)
bin_ic.bright_ratio = ratios[ind[0]]
bin_ic.dark_ratio = ratios[ind[1]]
| 6,869 | 40.636364 | 134 | py |
perm_hmm | perm_hmm-master/perm_hmm/models/hmms.py | """
An adaptation of the `pyro.distributions.DiscreteHMM`_ class.
The additions are to the log_prob method (which is incorrect as written in the
pyro package), and the ability to sample from the model, functionality which is
not included in the `pyro`_ model.
.. _pyro.distributions.DiscreteHMM: https://docs.pyro.ai/en/stable/distributions.html?#pyro.distributions.DiscreteHMM
.. _pyro: https://docs.pyro.ai/en/stable/
"""
from operator import mul
from functools import reduce
import torch
import pyro
import pyro.distributions as dist
import pyro.distributions.hmm
from pyro.distributions.hmm import _sequential_logmatmulexp
from pyro.distributions.util import broadcast_shape
from perm_hmm.util import wrap_index
from perm_hmm.return_types import HMMOutput
from perm_hmm.policies.policy import PermPolicy
class DiscreteHMM(pyro.distributions.hmm.DiscreteHMM):
"""A discrete hidden Markov model that generates data.
Adds a correct log_prob method, a vectorized sample method,
and a method to compute the posterior log initial state distribution.
"""
def __init__(self, initial_logits, transition_logits, observation_dist,
validate_args=None):
"""Initializes the HMM.
Just passes to the superclass initialization
method with a check for the presence of the ``_param`` attribute in the
``observation_dist``.
:raises ValueError: If the :attr:`observation_dist` doesn't have a
:attr:`.param` attribute.
"""
if not hasattr(observation_dist, '_param'):
raise ValueError("The observation distribution should have a "
"'._param' attribute. Try reencoding your "
"distribution as a pyro.distributions.Categorical "
"object.")
super().__init__(initial_logits, transition_logits, observation_dist,
validate_args=validate_args)
self.has_enumerate_support = self.observation_dist.has_enumerate_support
def enumerate_support(self, expand=True):
return self.observation_dist.enumerate_support(expand)
def posterior_log_initial_state_dist(self, value):
"""Computes the posterior log initial state distribution.
This computation is similar to the forward algorithm.
:param torch.Tensor value: The observed data.
shape ``(batch_shape, time_dim)``
:returns: The posterior log initial state distribution.
shape ``(batch_shape, state_dim)``
:raises ValueError: if the transition matrices are of the wrong size.
"""
if value.shape[-1] == 0:
return self.initial_logits
if value.shape[-1] == 1:
observation_logits = self.observation_dist.log_prob(value)
result = observation_logits + self.initial_logits
result -= result.logsumexp(-1, keepdim=True)
return result
value = value.unsqueeze(-1 - self.observation_dist.event_dim)
value = value.float()
observation_logits = self.observation_dist.log_prob(value)
head = observation_logits[..., 0, :]
tail = observation_logits[..., 1:, :]
tail = tail.unsqueeze(-2)
if len(self.transition_logits.shape) == 2:
result = self.transition_logits + tail
result = _sequential_logmatmulexp(result)
result = result.logsumexp(-1)
result = self.initial_logits + head + result
result = result - result.logsumexp(-1, keepdim=True)
elif len(self.transition_logits.shape) >= 3:
result = self.transition_logits[..., :-1, :, :] + tail
result = _sequential_logmatmulexp(result)
result = result.logsumexp(-1)
result = self.initial_logits + head + result
result = result - result.logsumexp(-1, keepdim=True)
else:
raise ValueError('Wrong size for transition matrices')
return result
def parameters(self):
"""A parameters method to fit into the torch framework.
:return: A list containing the initial log probs, the log transition
probs, and the params which describe the observation distribution.
"""
return \
[
self.initial_logits,
self.transition_logits,
self.observation_dist._param
]
def _nonevent_output_shape(self, sample_shape=()):
duration = self.duration
if duration is None:
if sample_shape == ():
time_shape = (1,)
else:
time_shape = sample_shape[-1:]
shape = sample_shape[:-1] + self.batch_shape + time_shape
else:
time_shape = (duration,)
shape = sample_shape + self.batch_shape + time_shape
return shape
def _flatten_batch(self, shape):
time_shape = shape[-1:]
total_batches = reduce(mul, shape[:-1], 1)
flat_shape = (total_batches,) + time_shape
tmats = self.transition_logits.exp().expand(
shape + self.transition_logits.shape[-2:]
).reshape(flat_shape + self.transition_logits.shape[-2:])
b = self.observation_dist.batch_shape
b_shape = broadcast_shape(shape, b[:-1])
k = self.observation_dist._param.shape
flat_params = \
self.observation_dist._param.expand(
b_shape + b[-1:] + (-1,)*(len(k)-len(b))
).reshape(flat_shape + b[-1:] + (-1,)*(len(k)-len(b)))
return flat_shape, tmats, flat_params
def sample(self, sample_shape=()):
"""Sample from the distribution.
WARNING: This method does not return the correct answer for HMMs with
heterogeneous outputs.
:param tuple sample_shape: tuple of ints. If the model doesn't contain a
time dimension, i.e. if :attr:`transition_logits` has only two
dimensions, then the last element of :attr:`sample_shape` is taken
to be the time dimension, and all others will be
treated independently as a batch.
So
``batch_shape = sample_shape[:-1] + self.batch_shape``,
``time_length = sample_shape[-1]``
If :attr:`sample_shape` is the empty tuple and the model doesn't
contain a time dimension, we just sample from the initial
distribution, otherwise all elements of
:attr:`sample_shape` are interpreted as batch dimensions, and the
time dimension of the model is always used.
So
``batch_shape = sample_shape + self.batch_shape``,
``time_length = self.transition_logits.shape[-3]``
:returns: ``batch_shape`` number of samples, each of length ``time_dim``
:raises ValueError: if the model shape does not broadcast to the
sample shape.
"""
shape = self._nonevent_output_shape(sample_shape)
flat_shape, tmats, flat_params = self._flatten_batch(shape)
total_batches, steps = flat_shape
dtype = self.observation_dist.sample().dtype
states = torch.empty(flat_shape, dtype=int)
observations = \
torch.empty(flat_shape + self.observation_dist.event_shape, dtype=dtype)
with pyro.plate("batches", total_batches) as batch:
states[batch, 0] = pyro.sample("x_{}_0".format(batch),
dist.Categorical(self.initial_logits.exp()),
)
observations[batch, 0] = pyro.sample(
"y_{}_0".format(batch),
type(self.observation_dist)(
flat_params[batch, 0, states[batch, 0]]
),
)
for t in pyro.markov(range(1, steps)):
states[batch, t] = pyro.sample(
"x_{}_{}".format(batch, t),
dist.Categorical(tmats[batch, t - 1, states[batch, t - 1]]),
)
observations[batch, t] = pyro.sample(
"y_{}_{}".format(batch, t),
type(self.observation_dist)(
flat_params[batch, t, states[batch, t]]
),
)
states = states.reshape(shape)
observations = observations.reshape(shape + self.observation_dist.event_shape)
return HMMOutput(states, observations)
def log_prob(self, value):
"""Computes the log likelihood of the given observations.
:param value: observations to compute the log_prob of.
shape ``(batch_shape, time_dim)``
:returns: The log likelihoods of the values.
shape ``batch_shape``
This code is based on the code for :py:class:`pyro.distributions.hmm.DiscreteHMM`,
the license for this is in the ``licenses/HMM_LICENSE.md``.
"""
value = value.unsqueeze(-1 - self.observation_dist.event_dim).float()
observation_logits = self.observation_dist.log_prob(value)
result = self.transition_logits + observation_logits.unsqueeze(-1)
result = _sequential_logmatmulexp(result)
result = self.initial_logits + result.logsumexp(-1)
result = result.logsumexp(-1)
return result
class PermutedDiscreteHMM(DiscreteHMM):
"""An HMM that allows for the underlying states to be permuted during a run.
"""
def __init__(self, initial_logits, transition_logits, observation_dist,
validate_args=None):
"""
:param initial_logits: log of the initial distribution
shape ``(state_dim,)``
:param transition_logits: log of the transition probabilities
shape ``(state_dim, state_dim)``
:param observation_dist: The output distribution of the HMM. Last
dimension of its ``batch_shape`` should be of size ``state_dim``
See :py:class:`~pyro.distributions.DiscreteHMM` for details on
shape restrictions.
:raises ValueError: If the :attr:`observation_dist` does not have the
:meth:`enumerate_support` method.
"""
if not observation_dist.has_enumerate_support:
raise ValueError("The observation distribution must have the "
".enumerate_support method.")
super().__init__(initial_logits, transition_logits, observation_dist,
validate_args=validate_args)
@classmethod
def from_hmm(cls, hmm: DiscreteHMM):
return cls(hmm.initial_logits, hmm.transition_logits, hmm.observation_dist)
def sample(self, sample_shape=(), perm_policy: PermPolicy = None):
r"""
This method allows us to sample from the HMM with a given
``PermPolicy``.
:param tuple sample_shape: tuple of ints. If the model doesn't contain a
time dimension, i.e. if :attr:`transition_logits` has only two
dimensions, then the last element of :attr:`sample_shape` is taken
to be the time dimension, and all others will be
treated independently as a batch.
So
``batch_shape = sample_shape[:-1] + self.batch_shape``,
``time_length = sample_shape[-1]``
If :attr:`sample_shape` is the empty tuple and the model doesn't
contain a time dimension, we just sample from the initial
distribution, otherwise all elements of
:attr:`sample_shape` are interpreted as batch dimensions, and the
time dimension of the model is always used.
So
``batch_shape = sample_shape + self.batch_shape``,
``time_length = self.transition_logits.shape[-3]``
:param perm_policy: A PermPolicy object, must implement
.get_perm, which is a method which takes batched data
of shape ``batch_shape``
and returns a batched permutation of shape
``batch_shape + (num_states,)``.
:returns: A :py:class:`HMMOutput` object, containing
`.states`: :py:class:`torch.Tensor`, dtype :py:class:`int`.
The states realized during the run.
shape ``batch_shape + (time_dim,)``
`.observations`: :py:class:`torch.Tensor`,
dtype :py:class:`float`.
The output observations.
shape ``batch_shape + (time_dim,)``
"""
if perm_policy is None:
return super().sample(sample_shape)
shape = self._nonevent_output_shape(sample_shape)
flat_shape, tmats, flat_params = self._flatten_batch(shape)
total_batches, steps = flat_shape
dtype = self.observation_dist.sample().dtype
states = torch.empty(flat_shape, dtype=int)
observations = \
torch.empty(
flat_shape + self.observation_dist.event_shape, dtype=dtype
)
with pyro.plate("batches", total_batches) as batch:
states[batch, 0] = pyro.sample(
"x_{}_0".format(batch),
dist.Categorical(self.initial_logits.exp().repeat(total_batches, 1)),
)
observations[batch, 0] = pyro.sample(
"y_{}_0".format(batch),
type(self.observation_dist)(
flat_params[batch, 0, states[batch, 0]]
),
)
for t in pyro.markov(range(1, flat_shape[-1])):
shaped_o = observations[batch, t-1].reshape(shape[:-1] + self.observation_dist.event_shape)
perm = perm_policy.get_perm(shaped_o, event_dims=self.observation_dist.event_dim).reshape(total_batches, len(self.initial_logits))
states[batch, t] = pyro.sample(
"x_{}_{}".format(batch, t),
dist.Categorical(
tmats[batch, t-1][
wrap_index(perm, perm.shape[:-1])
][batch, states[batch, t-1]],
),
)
observations[batch, t] = pyro.sample(
"y_{}_{}".format(batch, t),
type(self.observation_dist)(
flat_params[batch, t, states[batch, t]]
),
)
shaped_o = observations[batch, -1].reshape(shape[:-1] + self.observation_dist.event_shape)
perm = perm_policy.get_perm(shaped_o, event_dims=self.observation_dist.event_dim).reshape(total_batches, len(self.initial_logits))
states = states.reshape(shape)
observations = observations.reshape(shape + self.observation_dist.event_shape)
return HMMOutput(
states,
observations,
)
def expand_with_perm(self, perm):
"""Expands the model along the time dimension, according to a
permutation.
:param perm: The list of permutations to apply. Should be of shape
``batch_shape + (num_steps, num_states)``.
:return: An HMM expanded along the time dimension.
"""
batch_shape = perm.shape[:-1]
t_logits = self.transition_logits.expand(
batch_shape + self.transition_logits.shape[-2:]
)
t_logits = t_logits[wrap_index(perm, batch_shape=perm.shape[:-1])]
return type(self)(self.initial_logits, t_logits, self.observation_dist)
def posterior_log_initial_state_dist(self, data, perm=None):
"""The posterior log initial state distributions for the data, given the
permutations applied.
:param torch.Tensor data: Data to compute the posterior initial state
distribution for
:param torch.Tensor perm: Permutations that were applied.
:return:
"""
if perm is None:
return super().posterior_log_initial_state_dist(data)
else:
batch_shape = perm.shape[:-1]
if data.shape[:len(data.shape)-self.observation_dist.event_dim] != batch_shape:
raise ValueError("Perms and data do not have the same batch shape.")
return self.expand_with_perm(perm).posterior_log_initial_state_dist(data)
def log_prob(self, data, perm=None):
"""
Computes the log prob of a run, using the permutation sequence
that was applied to generate the data.
:param torch.Tensor perm: int.
The encoded permutations
applied to the HMM to generate the data.
:param torch.Tensor data: float.
A tensor containing the data to compute the log_prob for.
:returns: float :py:class:`torch.Tensor`.
The log probability of the data under the model where the
permutations encoded by perm is applied.
shape ``perm.shape[:-1]``
:raises ValueError: if :attr:`perm` and :attr:`data` are not compatible
shapes.
.. seealso:: Method
:py:meth:`~perm_hmm.models.hmms.DiscreteHMM.log_prob`
"""
if perm is None:
return super().log_prob(data)
batch_shape = perm.shape[:-1]
if data.shape[:len(data.shape)-self.observation_dist.event_dim] != batch_shape:
raise ValueError("Perms and data do not have the same batch shape.")
return self.expand_with_perm(perm).log_prob(data)
def random_hmm(n):
"""A utility for generating random HMMs.
Creates a uniformly random HMM with Bernoulli output. This means that each
row of the transition matrix is sampled from the Dirichlet distribution of
equal concentrations, as well as the initial state distribution, while the
output distributions have their "bright" probability drawn uniformly from
the unit interval.
.. seealso:: :py:meth:`~perm_hmm.models.hmms.random_phmm`
:param int n: Number of states for the HMM
:return: A DiscreteHMM with Bernoulli outputs.
"""
dirichlet = dist.Dirichlet(torch.ones(n) / n)
initial_logits = (torch.ones(n) / n).log()
transition_logits = dirichlet.sample((n,)).log()
observation_dist = dist.Bernoulli(torch.rand(n))
return DiscreteHMM(initial_logits, transition_logits, observation_dist)
def random_phmm(n):
"""A utility for generating random PermutedDiscreteHMMs.
Creates a uniformly random HMM with Bernoulli output. This means that each
row of the transition matrix is sampled from the Dirichlet distribution of
equal concentrations, as well as the initial state distribution, while the
output distributions have their "bright" probability drawn uniformly from
the unit interval.
.. seealso:: :py:meth:`~perm_hmm.models.hmms.random_hmm`
:param int n: Number of states for the HMM
:return: A PermutedDiscreteHMM with Bernoulli outputs.
"""
hmm = random_hmm(n)
return PermutedDiscreteHMM.from_hmm(hmm)
class SkipFirstDiscreteHMM(pyro.distributions.hmm.DiscreteHMM):
"""The initial state does not output.
"""
def __init__(self, initial_logits, transition_logits, observation_dist,
validate_args=None):
"""
Initializes the HMM. Just passes to the superclass initialization
method with a check for the presence of an attribute.
:raises ValueError: If the :attr:`observation_dist` doesn't have a
:attr:`.param` attribute.
"""
if not hasattr(observation_dist, '_param'):
raise ValueError("The observation distribution should have a "
"'._param' attribute. Try reencoding your "
"distribution as a pyro.distributions.Categorical "
"object.")
super().__init__(initial_logits, transition_logits, observation_dist,
validate_args=validate_args)
self.has_enumerate_support = self.observation_dist.has_enumerate_support
def enumerate_support(self, expand=True):
return self.observation_dist.enumerate_support(expand)
def posterior_log_initial_state_dist(self, value):
"""Computes the posterior log initial state distribution.
:param torch.Tensor value: The observed data.
shape ``(batch_shape, time_dim)``
:returns: The posterior log initial state distribution.
shape ``(batch_shape, state_dim)``
:raises ValueError: if the transition matrices are of the wrong size.
"""
if value.shape[-1] == 0:
return self.initial_logits
value = value.unsqueeze(-1 - self.observation_dist.event_dim)
value = value.float()
ol = self.observation_dist.log_prob(value)
ol = ol.unsqueeze(-2)
result = self.transition_logits + ol
result = _sequential_logmatmulexp(result)
result = result.logsumexp(-1)
result = self.initial_logits + result
result = result - result.logsumexp(-1, keepdim=True)
return result
def _nonevent_output_shape(self, sample_shape=()):
duration = self.duration
if duration is None:
if sample_shape == ():
time_shape = (1,)
else:
time_shape = sample_shape[-1:]
shape = sample_shape[:-1] + self.batch_shape + time_shape
else:
time_shape = (duration,)
shape = sample_shape + self.batch_shape + time_shape
return shape
def _flatten_batch(self, shape):
time_shape = shape[-1:]
total_batches = reduce(mul, shape[:-1], 1)
flat_shape = (total_batches,) + time_shape
tmats = self.transition_logits.exp().expand(
shape + self.transition_logits.shape[-2:]
).reshape(flat_shape + self.transition_logits.shape[-2:])
b = self.observation_dist.batch_shape
b_shape = broadcast_shape(shape, b[:-1])
k = self.observation_dist._param.shape
flat_params = \
self.observation_dist._param.expand(
b_shape + b[-1:] + (-1,)*(len(k)-len(b))
).reshape(flat_shape + b[-1:] + (-1,)*(len(k)-len(b)))
return flat_shape, tmats, flat_params
def sample(self, sample_shape=()):
"""
Sample from the distribution.
:param tuple sample_shape: tuple of ints. If the model doesn't contain a
time dimension, i.e. if :attr:`transition_logits` has only two
dimensions, then the last element of :attr:`sample_shape` is taken
to be the time dimension, and all others will be
treated independently as a batch.
So
``batch_shape = sample_shape[:-1] + self.batch_shape``,
``time_length = sample_shape[-1]``
If :attr:`sample_shape` is the empty tuple and the model doesn't
contain a time dimension, we just sample from the initial
distribution, otherwise all elements of
:attr:`sample_shape` are interpreted as batch dimensions, and the
time dimension of the model is always used.
So
``batch_shape = sample_shape + self.batch_shape``,
``time_length = self.transition_logits.shape[-3]``
:returns: ``batch_shape`` number of samples, each of length ``time_dim``
:raises ValueError: if the model shape does not broadcast to the
sample shape.
"""
shape = self._nonevent_output_shape(sample_shape)
flat_shape, tmats, flat_params = self._flatten_batch(shape)
total_batches, steps = flat_shape
dtype = self.observation_dist.sample().dtype
states = torch.empty(flat_shape[:-1] + (steps + 1,), dtype=int)
observations = \
torch.empty(flat_shape + self.observation_dist.event_shape, dtype=dtype)
with pyro.plate("batches", total_batches) as batch:
states[batch, 0] = pyro.sample("x_{}_0".format(batch),
dist.Categorical(self.initial_logits.exp()),
)
for t in pyro.markov(range(1, steps+1)):
states[batch, t] = pyro.sample(
"x_{}_{}".format(batch, t),
dist.Categorical(tmats[batch, t - 1, states[batch, t - 1]]),
)
observations[batch, t-1] = pyro.sample(
"y_{}_{}".format(batch, t-1),
type(self.observation_dist)(
flat_params[batch, t-1, states[batch, t]]
),
)
states = states.reshape(shape[:-1] + (steps+1,))
observations = observations.reshape(shape + self.observation_dist.event_shape)
return HMMOutput(states, observations)
class SkipFirstPermutedDiscreteHMM(SkipFirstDiscreteHMM):
"""Allows for the underlying states to be permuted during a run.
.. seealso:: :py:class:`~perm_hmm.models.hmms.PermutedDiscreteHMM`
"""
def __init__(self, initial_logits, transition_logits, observation_dist,
validate_args=None):
"""
:param initial_logits: log of the initial distribution
shape ``(state_dim,)``
:param transition_logits: log of the transition probabilities
shape ``(state_dim, state_dim)``
:param observation_dist: The output distribution of the HMM. Last
dimension of its ``batch_shape`` should be of size ``state_dim``
See :py:class:`pyro.distributions.DiscreteHMM` for details on
shape restrictions.
:raises ValueError: If the :attr:`observation_dist` does not have the
:meth:`enumerate_support` method.
"""
if not observation_dist.has_enumerate_support:
raise ValueError("The observation distribution must have the "
".enumerate_support method.")
super().__init__(initial_logits, transition_logits, observation_dist,
validate_args=validate_args)
@classmethod
def from_hmm(cls, hmm: SkipFirstDiscreteHMM):
return cls(hmm.initial_logits, hmm.transition_logits, hmm.observation_dist)
def sample(self, sample_shape=(), perm_policy: PermPolicy = None):
r"""Samples from the distribution.
Samples are generated using the ``perm_policy`` to select permutations
of the underlying states at each step.
The initial state does not have an output in this distribution.
:param tuple sample_shape: tuple of ints. If the model doesn't contain a
time dimension, i.e. if :attr:`transition_logits` has only two
dimensions, then the last element of :attr:`sample_shape` is taken
to be the time dimension, and all others will be
treated independently as a batch.
So
``batch_shape = sample_shape[:-1] + self.batch_shape``,
``time_length = sample_shape[-1]``
If :attr:`sample_shape` is the empty tuple and the model doesn't
contain a time dimension, we just sample from the initial
distribution, otherwise all elements of
:attr:`sample_shape` are interpreted as batch dimensions, and the
time dimension of the model is always used.
So
``batch_shape = sample_shape + self.batch_shape``,
``time_length = self.transition_logits.shape[-3]``
:param perm_policy: A PermPolicy object, must implement
.get_perm, which is a method which takes batched data
of shape ``batch_shape``
and returns a batched permutation of shape
``batch_shape + (num_states,)``.
:returns: A :py:class:`HMMOutput` object, containing
`.states`: :py:class:`torch.Tensor`, dtype :py:class:`int`.
The states realized during the run.
shape ``batch_shape + (time_dim,)``
`.observations`: :py:class:`torch.Tensor`,
dtype :py:class:`float`.
The output observations.
shape ``batch_shape + (time_dim,)``
"""
if perm_policy is None:
return super().sample(sample_shape)
shape = self._nonevent_output_shape(sample_shape)
flat_shape, tmats, flat_params = self._flatten_batch(shape)
total_batches, steps = flat_shape
dtype = self.observation_dist.sample().dtype
states = torch.empty(flat_shape[:-1] + (flat_shape[-1] + 1,), dtype=int)
observations = \
torch.empty(
flat_shape + self.observation_dist.event_shape, dtype=dtype
)
with pyro.plate("batches", total_batches) as batch:
states[batch, 0] = pyro.sample(
"x_{}_0".format(batch),
dist.Categorical(self.initial_logits.exp().repeat(total_batches, 1)),
)
perm = torch.arange(len(self.initial_logits)).expand(
total_batches,
-1,
)
for t in pyro.markov(range(1, flat_shape[-1]+1)):
states[batch, t] = pyro.sample(
"x_{}_{}".format(batch, t),
dist.Categorical(
tmats[batch, t-1][
wrap_index(perm, perm.shape[:-1])
][batch, states[batch, t-1]],
),
)
observations[batch, t-1] = pyro.sample(
"y_{}_{}".format(batch, t-1),
type(self.observation_dist)(
flat_params[batch, t-1, states[batch, t]]
),
)
shaped_o = observations[batch, t-1].reshape(
shape[:-1] + self.observation_dist.event_shape
) # Shape the observation before passing to perm, so that the
# perms have the right shape in the perm_history later.
perm = perm_policy.get_perm(
shaped_o,
event_dims=self.observation_dist.event_dim
).reshape(total_batches, len(self.initial_logits))
states = states.reshape(shape[:-1] + (shape[-1]+1,))
observations = observations.reshape(shape + self.observation_dist.event_shape)
return HMMOutput(
states,
observations,
)
def expand_with_perm(self, perm):
# HACK: We use the convention that the last permutation acts after the last
# data, so it's irrelevant. Therefore, throw out the last permutation.
# On the other hand, the initial permutation is always the identity, so
# attach that.
perm = perm[..., :-1, :]
batch_shape = perm.shape[:-1]
num_states = perm.shape[-1]
iden = torch.arange(num_states).expand(perm.shape[:-2] + (1, num_states))
perm = torch.cat((iden, perm), dim=-2)
batch_shape = batch_shape[:-1] + (batch_shape[-1] + 1,)
t_logits = self.transition_logits.expand(
batch_shape + self.transition_logits.shape[-2:]
)
t_logits = t_logits[wrap_index(perm, batch_shape=perm.shape[:-1])]
return type(self)(self.initial_logits, t_logits, self.observation_dist)
def posterior_log_initial_state_dist(self, data, perm=None):
"""The posterior log initial state distributions for the data, given the
permutations applied.
:param torch.Tensor data: Data to compute the posterior initial state
distribution for
:param torch.Tensor perm: Permutations that were applied.
:return:
"""
if perm is None:
return super().posterior_log_initial_state_dist(data)
else:
batch_shape = perm.shape[:-1]
if data.shape[:len(data.shape)-self.observation_dist.event_dim] != batch_shape:
raise ValueError("Perms and data do not have the same batch shape.")
return self.expand_with_perm(perm).posterior_log_initial_state_dist(data)
def log_prob(self, data, perm=None):
"""Computes the log prob of a run, using the permutation sequence
that was applied to generate the data.
:param torch.Tensor perm: int.
The encoded permutations
applied to the HMM to generate the data.
:param torch.Tensor data: float.
A tensor containing the data to compute the log_prob for.
:returns: float :py:class:`torch.Tensor`.
The log probability of the data under the model where the
permutations encoded by perm is applied.
shape ``perm.shape[:-1]``
:raises ValueError: if :attr:`perm` and :attr:`data` are not compatible
shapes.
.. seealso:: Method
:py:meth:`perm_hmm.models.hmms.DiscreteHMM.log_prob`
"""
if perm is None:
return super().log_prob(data)
batch_shape = perm.shape[:-1]
if data.shape[:len(data.shape)-self.observation_dist.event_dim] != batch_shape:
raise ValueError("Perms and data do not have the same batch shape.")
return self.expand_with_perm(perm).log_prob(data)
class ExpandedHMM(SkipFirstPermutedDiscreteHMM):
r"""
HMM with outcomes :math:`\mathcal{Y}`, and state space
:math:`\mathcal{S} \times \mathcal{Y}`, where :math:`\mathcal{S}` is the
physical state space.
"""
def lo_to_i(self, lo):
r"""Get serial index from tuple index.
:param tuple lo: 2-tuple, a pair of :math:`(l, o) \in \mathcal{S} \times
\mathcal{Y}`
:return: Serial index :math:`i`
"""
odim = self.observation_dist.enumerate_support().shape[0]
return lo[0]*odim + lo[1]
def i_to_lo(self, i):
r"""Get tuple index from serial
:param int i:
:return: 2-tuple, a pair of :math:`(l, o) \in \mathcal{S} \times
\mathcal{Y}`
"""
odim = self.observation_dist.enumerate_support().shape[0]
return divmod(i, odim)
| 34,164 | 41.23115 | 146 | py |
perm_hmm | perm_hmm-master/perm_hmm/analysis/policy_viz.py | """Tools for visualizing permutation policies.
"""
import os
import argparse
from copy import deepcopy
import torch
import anytree as at
from anytree.exporter import UniqueDotExporter
from perm_hmm.util import id_and_transpositions
from perm_hmm.policies.policy import PermPolicy
from perm_hmm.policies.min_tree import MinEntPolicy
from example_systems.three_states import three_state_hmm
from perm_hmm.analysis.graph_utils import uniform_tree
def list_tree_to_anytree(list_tree, possible_perms):
num_outcomes = list_tree[0].shape[0]
tree = uniform_tree(len(list_tree)-1, num_outcomes)
for node in at.LevelOrderIter(tree):
if node.is_root:
continue
address = tuple(n.name for n in node.path[1:])
p = list_tree[node.depth-1][address]
node.data = {'perm': possible_perms[p]}
return tree
def attach_perms_and_policies_to_tree(tree, policy: PermPolicy):
"""
At each node in a tree representing data observed, attaches the permutation that would be selected at that point and the policy used up to that point.
:param tree: An AnyTree representing the data.
:param policy: A PermPolicy object.
:return: The same tree, but with the permutation and the policy used up to that point attached to each node.
"""
node = tree.root
s = deepcopy(policy)
s.reset()
node.data = {'policy': s}
for node in at.LevelOrderIter(tree):
if node.is_root:
continue
s = deepcopy(node.parent.data['policy'])
d = torch.tensor([node.name])
p = s.get_perm(d)
node.data = {'policy': s, 'perm': p}
return tree
def remove_policies(tree):
"""
Given a tree with policies in dictionaries attached to each node, removes the policies.
:param tree: An AnyTree representing the data.
:return: The same tree, but with the policies removed.
"""
for node in at.PreOrderIter(tree):
del node.data['policy']
return tree
def attach_perms_to_tree(tree, policy: PermPolicy):
"""
At each node in a tree representing data observed, attaches the permutation that would be selected at that point.
:param tree:
:param policy:
:return:
"""
tree = attach_perms_and_policies_to_tree(tree, policy)
tree = remove_policies(tree)
return tree
def make_full_decision_tree(policy, num_steps):
num_outcomes = policy.hmm.enumerate_support().shape[0]
tree = uniform_tree(num_steps, num_outcomes)
tree = attach_perms_to_tree(tree, policy)
return tree
def main(minus_log_a=3, minus_log_b=3, num_steps=3, output_file=None, save_graph=False):
if output_file is None:
output_file = 'policy_viz.dot'
os.path.join(os.getcwd(), output_file)
if not save_graph:
directory = os.path.dirname(output_file)
directory = os.path.join(directory, '../../example_scripts/graphs')
if not os.path.exists(directory):
os.makedirs(directory)
output_file = os.path.join(directory, output_file)
hmm = three_state_hmm(-minus_log_a, -minus_log_b)
possible_perms = id_and_transpositions(hmm.initial_logits.shape[0])
policy = MinEntPolicy(possible_perms, hmm)
tree = make_full_decision_tree(policy, num_steps)
def nodeattrfunc(node):
if node.is_root:
return '"label"="root"'
return '"label"="{}"'.format(str(list(node.data['perm'][0].numpy())))
def edgeattrfunc(node, child):
return '"label"="{}"'.format(child.name)
exporter = UniqueDotExporter(tree.root, edgeattrfunc=edgeattrfunc, nodeattrfunc=nodeattrfunc)
exporter.to_dotfile(output_file)
# if not save_graph:
# os.remove(output_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--minus_log_a', type=int, default=3)
parser.add_argument('--minus_log_b', type=int, default=3)
parser.add_argument('--num_steps', type=int, default=3)
parser.add_argument('--output_file', type=str, default=None)
parser.add_argument('--save_graph', action='store_true', default=False)
args = parser.parse_args()
minus_log_a = args.minus_log_a
minus_log_b = args.minus_log_b
num_steps = args.num_steps
output_file = args.output_file
save_graph = args.save_graph
main(minus_log_a, minus_log_b, num_steps, output_file, save_graph)
| 4,371 | 33.425197 | 154 | py |
perm_hmm | perm_hmm-master/perm_hmm/classifiers/generic_classifiers.py | class Classifier(object):
"""
A generic classifier, has only the classify method.
"""
def classify(self, data, verbosity=0):
"""Performs classification
:param torch.Tensor data: Data to classify. Arbitrary shape.
:param verbosity: Flag to return ancillary data generated in the computation.
:return: If verbosity = 0, return just the classifications.
Otherwise, return a tuple of length two. The first entry is the
classifications, while the second is a dict.
:raises NotImplementedError: If this method is not implemented.
"""
raise NotImplementedError
class MAPClassifier(Classifier):
"""The `maximum a posteriori`_ classifier. Requires a model that implements
posterior_log_initial_state_dist
.. _`maximum a posteriori`: https://en.wikipedia.org/wiki/Maximum_a_posteriori_estimation
Instances of this class have the following attributes:
``model``:
A model that implements the method ``posterior_log_initial_state_dist``.
"""
def __init__(self, model):
super().__init__()
self.model = model
def classify(self, data, verbosity=0):
"""Classifies according to the maximum a posteriori classification.
:param torch.Tensor data: Last dimension should be time.
:param verbosity: Flag for whether to return the
posterior log initial state distributions, used in the computation.
:return: If verbosity = 0, the classifications, with shape data.shape[:-1]
else, the classifications and a dictionary containing the posterior
log initial state distribution, with key
b"posterior_log_initial_state_dist".
"""
plisd = self.model.posterior_log_initial_state_dist(data)
classifications = plisd.argmax(-1)
if not verbosity:
return classifications
else:
return classifications, {b"posterior_log_initial_state_dist": plisd}
| 2,015 | 37.769231 | 93 | py |
perm_hmm | perm_hmm-master/perm_hmm/classifiers/perm_classifier.py | from perm_hmm.classifiers.generic_classifiers import MAPClassifier
class PermClassifier(MAPClassifier):
"""
MAP classifier for an HMM with permutations.
"""
def classify(self, data, perms=None, verbosity=0):
"""Classifies data.
Calls MAPClassifier(self.model.expand_with_perm(perms)).classify(data, verbosity)
:param torch.Tensor data: To be classified. Last dimension interpreted as time.
:param perms: Permutations. Should have shape == data.shape + (num_states,)
:param verbosity: If nonzero, returns a tuple with second element a dict
containing key b"posterior_log_initial_state_dist".
:return: Classifications, and if verbosity is nonzero, a dict as well.
"""
if perms is None:
retval = super().classify(data, verbosity=verbosity)
else:
retval = MAPClassifier(self.model.expand_with_perm(perms)).classify(data, verbosity)
return retval
| 979 | 38.2 | 96 | py |
perm_hmm | perm_hmm-master/perm_hmm/classifiers/interrupted.py | """
This module defines the interrupted classification scheme.
Using an iid model, we can make an inference based on data
which "collects enough evidence".
"""
import torch
from perm_hmm.util import first_nonzero, indices
from perm_hmm.classifiers.generic_classifiers import Classifier
class IIDInterruptedClassifier(Classifier):
r"""
A classifier that will terminate before the end of a run if a likelihood ratio exceeds a threshold.
The classifier is initialized with a distribution which is a model of the distribution which generated the data :math:`p_i`,
the states for which we are performing a multivalued hypothesis test for :math:`\mathcal{S}`,
and possibly a log ratio :math:`R` which dictates whether the classifier terminates the run and concludes.
"""
def __init__(self, dist, ratio, testing_states=None):
super().__init__()
self.dist = dist
"""
Distribution used to compute probabilities.
Should have .batch_shape[-1] == num_states
"""
self.ratio = ratio
"""
Threshold likelihood ratio.
"""
if testing_states is not None:
self.testing_states = testing_states
else:
self.testing_states = torch.arange(self.dist.batch_shape[-1])
def classify(self, data, verbosity=0):
r"""
Classifies data.
At each time :math:`t`, denote by :math:`q^{(t)}_j(y^t)` the sorted likelihoods :math:`r_i(y^t) = \prod_{s=0}^tr_i(y_s)`,
so that :math:`q^{(t)}_0(y^t) > \ldots > q^{(t)}_n(y^t)`. Then in particular :math:`q^{(t)}_0(y^t)` is the maximum likelihood of the data
under the model. Then we compute
.. math::
\lambda_t = \log\Bigg(\frac{q^{(t)}_0(y^t)}{q^{(t)}_1(y^t)}\Bigg)
If at any point we have :math:`\lambda_t > R`, we terminate the run and make the inference
.. math::
\hat{s} = \mathcal{S}(\operatorname{argmax}_{i} r_i(y^t))
:param torch.Tensor data: Last dimension interpreted as time dimension.
:param verbosity: If true, then return final log likelihood ratios and
the break_flag, indicating whether the inference concluded
before reaching the end of the time series.
:return: If verbosity == 0, just the classifications, otherwise
a tuple with the second entry a dict, containing
b"break_flag": (Boolean tensor with shape == classifications.shape)
indicates if classification was performed before the end
of the time series
b"log_like_ratio": (float tensor with shape == classifications.shape)
Final log likelihood ratio of the most likely to the second
most likely.
b"sort_inds": (int tensor with shape == data.shape + (state_dim,))
Indices returned by torch.sort(intermediate_lps, -1),
Indicates order of likelihoods of states at that step.
b"first_breaks": (int tensor with shape == classifications.shape).
First indices where the likelihood exceeds the threshold,
for each run.
"""
shape = data.shape
if shape == ():
data = data.expand(1, 1)
elif len(shape) == 1:
data = data.expand(1, -1)
data = data.float()
intermediate_lps = self.dist.log_prob(data.unsqueeze(-1)).cumsum(dim=-2).float()
sort_lps, sort_inds = torch.sort(intermediate_lps, -1)
sort_lrs = sort_lps[..., -1] - sort_lps[..., -2]
breaks = sort_lrs.view((1,)*len(self.ratio.shape) + sort_lrs.shape) > self.ratio.view(self.ratio.shape + (1,)*len(sort_lrs.shape))
first_breaks = first_nonzero(breaks, -1)
ix = indices(first_breaks.shape)
_, sort_inds = torch.broadcast_tensors(breaks.unsqueeze(-1), sort_inds)
classifications = sort_inds[..., -1, -1]
mask = first_breaks < breaks.shape[-1]
fb = first_breaks.clone().detach()
fb[~mask] = -1
classifications[mask] = sort_inds[ix + (fb, torch.full_like(fb, -1, dtype=int))][mask]
classifications = self.testing_states[classifications]
if not verbosity:
return classifications
else:
return classifications, {
b"break_flag": breaks.any(-1),
b"log_like_ratio": sort_lrs[..., -1],
b"sort_inds": sort_inds,
b"first_breaks": first_breaks,
}
class IIDBinaryIntClassifier(Classifier):
r"""
Performs a classification between two states based on likelihood ratio tests,
concluding if there is enough evidence.
Distinct from :py:class:`IIDInterruptedClassifier` because there are two
parameters to decide whether to interrupt the run.
.. seealso:: :py:class:`IIDInterruptedClassifier`
"""
def __init__(self, bright_model, dark_model, bright_ratio, dark_ratio, bright_state=None, dark_state=None):
super().__init__()
self.bright_model = bright_model
self.dark_model = dark_model
self.bright_ratio = bright_ratio
r"""
Torch float. Parameter such that if :math:`\log(L_{bright}/L_{dark})` exceeds
it, the classifier concludes there is enough evidence to terminate and
classify as bright
"""
self.dark_ratio = dark_ratio
r"""
Torch float. Parameter such that if :math:`\log(L_{dark}/L_{bright})` exceeds
it, the classifier concludes there is enough evidence to terminate and
classify as dark
"""
if (bright_state is not None) and (dark_state is not None):
self.testing_states = torch.tensor([dark_state, bright_state])
else:
self.testing_states = None
def classify(self, data, verbosity=0):
r"""
Performs classification.
At each time :math:`t`, compute :math:`\lambda = \log(L_{bright}(y^t)/L_{dark}(y^t))`,
and conclude bright if :math:`\lambda` > ``self.bright_ratio``, and conclude dark if
:math:`-\lambda` > ``self.dark_ratio``.
:param data: Last dimension is interpreted as time.
:param verbosity: Flag to indicate whether to return ancillary computations.
:return: If verbosity == 0, returns classifications. Else, returns a tuple with
first element the classifications and the second a dict containing
b"break_flag": bool tensor, shape == classifications.shape
Indicates if the classification concluded before reaching the
end of the time series
b"log_like_ratio" float tensor, shape == classifications.shape
final :math:`\log(L_{bright}(y^t)/L_{dark}(y^t))`.
b"first_break_bright": int tensor, shape == classifications.shape
The first indices where the likelihood ratio exceeds the
threshold ``self.bright_ratio``.
b"first_break_dark": int tensor, shape == classifications.shape
The first indices where the likelihood ratio exceeds the
threshold ``self.dark_ratio``.
"""
shape = data.shape
if shape == ():
data = data.expand(1, 1)
elif len(shape) == 1:
data = data.expand(1, -1)
data = data.float()
intermediate_bright_lp = self.bright_model.log_prob(data).cumsum(dim=-1).float()
intermediate_dark_lp = self.dark_model.log_prob(data).cumsum(dim=-1).float()
intermediate_lr = intermediate_bright_lp - intermediate_dark_lp
bright_most_likely = intermediate_lr[..., -1] > 0
break_bright = intermediate_lr > self.bright_ratio
break_dark = -intermediate_lr > self.dark_ratio
first_break_bright = first_nonzero(break_bright, -1)
first_break_dark = first_nonzero(break_dark, -1)
bright_first = first_break_bright < first_break_dark
bright_break_flag = break_bright.any(dim=-1)
dark_break_flag = break_dark.any(dim=-1)
break_flag = bright_break_flag | dark_break_flag
neither_break = ~break_flag
both_break = (bright_break_flag & dark_break_flag)
one_break = bright_break_flag.logical_xor(dark_break_flag)
classified_bright = \
(one_break & bright_break_flag) | \
(both_break & (bright_first & bright_break_flag)) | \
(neither_break & bright_most_likely)
classified_bright = classified_bright.long()
if self.testing_states is not None:
classifications = self.testing_states[classified_bright]
else:
classifications = classified_bright
if not verbosity:
return classifications
else:
return classifications, {
b"break_flag": break_flag,
b"log_like_ratio": intermediate_lr[..., -1],
b"first_break_bright": first_break_bright,
b"first_break_dark": first_break_dark,
}
| 9,158 | 41.207373 | 145 | py |
perm_hmm | perm_hmm-master/tests/sample_min_entropy_test.py | import unittest
from perm_hmm.models.hmms import PermutedDiscreteHMM
import torch
import pyro
import pyro.distributions as dist
from perm_hmm.util import ZERO
from perm_hmm.policies.min_tree import MinEntPolicy
class MyTestCase(unittest.TestCase):
def setUp(self):
self.num_states = 2
self.observation_probs = torch.tensor([.5, 1])
self.observation_constructor = pyro.distributions.Bernoulli
self.observation_dist = \
self.observation_constructor(self.observation_probs)
self.n_outcomes = 2
self.possible_outputs = torch.arange(2, dtype=torch.float).unsqueeze(-1)
self.possible_perms = torch.tensor([[0, 1],
[1, 0]], dtype=int)
self.num_perms = len(self.possible_perms)
self.transition_logits = torch.tensor([[1-ZERO, ZERO], [.5, .5]]).log().float()
self.initial_logits = torch.tensor([.5, .5]).log()
self.bdhmm = PermutedDiscreteHMM(self.initial_logits,
self.transition_logits,
self.observation_dist)
self.deep_hmm = PermutedDiscreteHMM(
self.initial_logits,
self.transition_logits,
dist.Bernoulli(torch.rand(2, 2))
)
self.deep_perm_policy = MinEntPolicy(self.possible_perms, self.deep_hmm, save_history=True)
self.perm_policy = MinEntPolicy(self.possible_perms, self.bdhmm, save_history=True)
def test_sample_minent(self):
x, y = self.bdhmm.sample((10, 7), self.perm_policy)
perm_array = self.perm_policy.perm_history
hist = self.perm_policy.calc_history
dist_array = hist[b"penultimates"].logsumexp(-1)
entropy_array = hist[b"log_costs"]
self.assertTrue(x.shape == (10, 7))
self.assertTrue(y.shape == (10, 7))
self.assertTrue(perm_array.shape == (10, 7, 2))
self.assertTrue(dist_array.shape == (10, 7, 2))
# self.assertTrue(entropy_array[-2].shape == (10, 7))
self.perm_policy.reset(save_history=True)
b_perm_array = self.perm_policy.get_perms(y)
b_hist = self.perm_policy.calc_history
b_dist_array = b_hist[b"penultimates"].logsumexp(-1)
# b_entropy_array = b_hist[b"entropy"]
self.assertTrue(b_dist_array.exp().allclose(dist_array.exp(), atol=1e-6))
self.assertTrue(torch.all(b_perm_array == perm_array))
# self.assertTrue(b_entropy_array.allclose(entropy_array, atol=1e-7))
def test_shapes(self):
shapes = [(10, 7), (), (1,), (10, 1), (1, 1), (10,)]
# shallow = [(self.perm_policy, self.bdhmm), (self.deep_perm_policy, self.deep_hmm)]
shallow = [(self.perm_policy, self.bdhmm)]
for shape in shapes:
for typ in shallow:
ps, hmm = typ
with self.subTest(shape=shape, hmm=hmm):
x, y = hmm.sample(shape)
t_shape = shape
if hmm == self.deep_hmm:
t_shape = t_shape + self.deep_hmm.observation_dist.batch_shape[:-1]
else:
if t_shape == ():
t_shape = (1,)
self.assertTrue(x.shape == t_shape)
self.assertTrue(
y.shape == t_shape + hmm.observation_dist.event_shape)
with self.subTest(shape=shape, ps=ps, hmm=hmm):
ps.reset(save_history=True)
x, y = hmm.sample(shape, ps)
print("input shape", shape)
perm_array = ps.perm_history
hist = ps.calc_history
dist_array = hist[b"penultimates"].logsumexp(-1)
# entropy_array = hist[b"entropy"]
t_shape = shape
s_shape = shape
if hmm == self.deep_hmm:
t_shape = t_shape + self.deep_hmm.observation_dist.batch_shape[:-1]
s_shape = s_shape + self.deep_hmm.observation_dist.batch_shape[:-1]
else:
if t_shape == ():
t_shape = (1,)
s_shape = ()
self.assertTrue(x.shape == t_shape)
self.assertTrue(y.shape == t_shape + hmm.observation_dist.event_shape)
self.assertTrue(perm_array.shape == t_shape + (2,))
self.assertTrue(dist_array.shape == t_shape + (2,))
# self.assertTrue(entropy_array.shape == t_shape)
if __name__ == '__main__':
unittest.main()
| 4,700 | 46.01 | 99 | py |
perm_hmm | perm_hmm-master/tests/postprocessing_tests.py | import unittest
import torch
import torch.distributions
import pyro.distributions as dist
from perm_hmm.policies.min_tree import MinEntPolicy
from perm_hmm.models.hmms import DiscreteHMM, PermutedDiscreteHMM
from perm_hmm.classifiers.interrupted import IIDInterruptedClassifier
from perm_hmm.training.interrupted_training import train_ic, exact_train_ic
from perm_hmm.postprocessing import ExactPostprocessor, EmpiricalPostprocessor
from perm_hmm.util import transpositions, num_to_data, ZERO
from perm_hmm.classifiers.generic_classifiers import MAPClassifier
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.num_states = 5
self.num_testing_states = 3
ts = torch.randint(self.num_states, (self.num_testing_states,))
while len(ts.unique()) != len(ts):
ts = torch.randint(self.num_states, (self.num_testing_states,))
self.testing_states = ts
dir = dist.Dirichlet(torch.ones(self.num_testing_states)/self.num_testing_states)
not_states = torch.tensor(list(set(range(self.num_states)).difference(set(self.testing_states.tolist()))))
il = dir.sample()
while (il < 1e-1).any():
il = dir.sample()
self.initial_logits = torch.empty(self.num_states)
self.initial_logits[self.testing_states] = il
self.initial_logits[not_states] = ZERO
self.initial_logits = self.initial_logits.log()
dir = dist.Dirichlet(torch.ones(self.num_states)/self.num_states)
self.transition_logits = dir.sample((self.num_states,)).log()
self.observation_probs = torch.rand((self.num_states,))
self.observation_dist = dist.Bernoulli(self.observation_probs)
self.possible_perms = torch.stack([torch.arange(self.num_states)] + transpositions(self.num_states))
self.bdhmm = PermutedDiscreteHMM(self.initial_logits,
self.transition_logits,
self.observation_dist)
self.perm_policy = MinEntPolicy(self.possible_perms, self.bdhmm, save_history=True)
self.shmm = DiscreteHMM(self.initial_logits,
self.transition_logits,
self.observation_dist)
def test_postprocessing(self):
max_t = 10
num_samples = 1000
x, y = self.shmm.sample((num_samples, max_t))
ground_truth = x[..., 0]
log_post_dist = self.shmm.posterior_log_initial_state_dist(y)
classifications = log_post_dist.argmax(-1)
ep = EmpiricalPostprocessor(ground_truth, classifications)
mr = ep.confusion_matrix(.95)
print(mr)
self.assertTrue(torch.allclose(mr[b"matrix"][self.testing_states].sum(-1), torch.tensor(1.)))
self.assertTrue((mr[b"lower"][self.testing_states] <= mr[b"matrix"][self.testing_states]).all())
self.assertTrue((mr[b"upper"][self.testing_states] >= mr[b"matrix"][self.testing_states]).all())
v = self.perm_policy.get_perms(y)
hist = self.perm_policy.calc_history
b_log_post_dist = hist[b"belief"][..., -1, :].logsumexp(-1)
b_classifications = b_log_post_dist.argmax(-1)
bep = EmpiricalPostprocessor(ground_truth, b_classifications)
mr = bep.confusion_matrix()
print(mr)
self.assertTrue(torch.allclose(mr[b"matrix"][self.testing_states].sum(-1), torch.tensor(1.)))
self.assertTrue((mr[b"lower"][self.testing_states] <= mr[b"matrix"][self.testing_states]).all())
self.assertTrue((mr[b"upper"][self.testing_states] >= mr[b"matrix"][self.testing_states]).all())
observation_params = self.observation_dist._param
bright_state = observation_params.argmax(-1)
dark_state = observation_params.argmin(-1)
testing_states = torch.tensor([dark_state, bright_state])
ic = IIDInterruptedClassifier(
dist.Bernoulli(self.observation_probs[testing_states]),
torch.tensor(1.),
testing_states=testing_states,
)
train_x, train_y = self.shmm.sample((num_samples, max_t))
ground_truth = train_x[..., 0]
_ = train_ic(ic, train_y, train_x[..., 0])
ic_results = ic.classify(y)
ip = EmpiricalPostprocessor(
ground_truth,
ic_results
)
mr = ip.confusion_matrix()
print(mr)
self.assertTrue(torch.allclose(mr[b"matrix"][self.testing_states].sum(-1), torch.tensor(1.)))
self.assertTrue((mr[b"lower"][self.testing_states] <= mr[b"matrix"][self.testing_states]).all())
self.assertTrue((mr[b"upper"][self.testing_states] >= mr[b"matrix"][self.testing_states]).all())
all_data = torch.stack([num_to_data(x, max_t) for x in range(2**max_t)])
all_naive_post = self.shmm.posterior_log_initial_state_dist(all_data)
naive_lp = self.shmm.log_prob(all_data)
log_joint = all_naive_post.T + naive_lp
map_class = MAPClassifier(self.shmm)
classifications = map_class.classify(all_data)
np = ExactPostprocessor(
log_joint,
classifications,
)
mr = np.log_misclassification_rate()
conf = np.log_confusion_matrix()
print(mr)
self.assertTrue(conf[self.testing_states].logsumexp(-1).allclose(torch.tensor(0.), atol=1e-6))
self.perm_policy.reset(save_history=True)
bayes_results = self.perm_policy.get_perms(all_data)
hist = self.perm_policy.calc_history
phmm = self.bdhmm.expand_with_perm(bayes_results)
b_map_class = MAPClassifier(phmm)
lp = phmm.log_prob(all_data)
plisd = phmm.posterior_log_initial_state_dist(all_data)
b_log_joint = lp + plisd.T
b_classifications = b_map_class.classify(all_data)
bp = ExactPostprocessor(
b_log_joint,
b_classifications,
)
mr = bp.log_misclassification_rate()
conf = bp.log_confusion_matrix()
print(mr)
self.assertTrue(conf[self.testing_states].logsumexp(-1).allclose(torch.tensor(0.), atol=1e-6))
_ = exact_train_ic(ic, all_data, log_joint)
ic_results = ic.classify(all_data)
ip = ExactPostprocessor(
log_joint,
ic_results,
)
ave = ip.log_misclassification_rate()
conf = ip.log_confusion_matrix()
print(mr)
self.assertTrue(conf[self.testing_states].logsumexp(-1).allclose(torch.tensor(0.), atol=1e-6))
if __name__ == '__main__':
unittest.main()
| 6,561 | 47.25 | 114 | py |
perm_hmm | perm_hmm-master/tests/ignore_transitions_tests.py | import pytest
import numpy as np
from scipy.special import logsumexp, log1p
import torch
import pyro.distributions as dist
from perm_hmm.util import num_to_data
from perm_hmm.policies.ignore_transitions import IgnoreTransitions
from perm_hmm.models.hmms import PermutedDiscreteHMM
from perm_hmm.classifiers.perm_classifier import PermClassifier
from perm_hmm.postprocessing import ExactPostprocessor
from perm_hmm.policies.min_tree import MinEntPolicy
from perm_hmm.rate_comparisons import exact_rates
from adapt_hypo_test.two_states import util, no_transitions as nt
from adapt_hypo_test.two_states.util import nx_to_log_probs, pq_to_m, m_to_r
@pytest.mark.parametrize('p, q, n', [
(.1, .2, 5),
])
def test_ignore_transitions(p, q, n):
it = IgnoreTransitions(torch.eye(2, dtype=int), p, q, 0, 1)
sigmas, chi = nt.solve(p, q, n)
it.solve(n)
assert all([np.all(s == sp) for s, sp in zip(sigmas, it.sigmas)])
yn = np.stack([num_to_data(num, n) for num in range(2**n)])
x, applied = nt.evaluate_sigmas(sigmas, yn)
it_applied = it.get_perms(torch.from_numpy(yn))
bool_it_applied = (it_applied == torch.tensor([1, 0], dtype=int).unsqueeze(-2).unsqueeze(-2)).all(-1)
assert np.all(applied[..., 1:] == bool_it_applied.numpy()[..., :-1])
@pytest.mark.parametrize('p, q, n', [
(.1, .2, 5),
])
def test_it_rate(p, q, n):
il = (torch.ones(2)/2).log()
tl = (torch.eye(2) + np.finfo(float).eps).log()
tl -= tl.logsumexp(-1, keepdim=True)
observation_dist = dist.Bernoulli(torch.tensor([p, 1-q]))
hmm = PermutedDiscreteHMM(il, tl, observation_dist)
cf = PermClassifier(hmm)
yn = np.stack([num_to_data(num, n) for num in range(2**n)])
tyn = torch.from_numpy(yn)
sigmas, chi = nt.solve(p, q, n)
x, applied = nt.evaluate_sigmas(sigmas, yn)
perms = torch.tensor([0, 1], dtype=int).expand(yn.shape + (2,)).clone().detach()
perms[applied] = torch.tensor([1, 0], dtype=int)
perms = torch.roll(perms, -1, dims=-2)
c, d = cf.classify(tyn, perms=perms, verbosity=2)
log_joint = hmm.posterior_log_initial_state_dist(tyn, perms).T + hmm.log_prob(tyn, perms)
ntlj = nt.log_joint(yn.astype(int), util.pq_to_m(p, q), sigmas)
assert np.allclose(ntlj, log_joint.numpy())
lp = hmm.log_prob(tyn, perms)
ntlp = nt.lp(yn.astype(int), util.pq_to_m(p, q), sigmas)
assert np.allclose(ntlp, lp)
ep = ExactPostprocessor(log_joint, c)
r = ep.log_misclassification_rate()
rp = log1p(-np.exp(logsumexp(chi.ravel(), axis=-1) - np.log(2)))
assert np.isclose(r.numpy(), rp)
def pq_to_hmm(p, q):
tl = (torch.eye(2) + np.finfo(float).eps).log()
tl -= tl.logsumexp(-1, keepdim=True)
il = torch.ones(2) - torch.log(torch.tensor(2.))
observation_dist = dist.Bernoulli(torch.tensor([p, 1-q]))
return PermutedDiscreteHMM(il, tl, observation_dist)
def two_transitions():
return torch.eye(2, dtype=int)
def me_policy(hmm):
return MinEntPolicy(two_transitions(), hmm, save_history=True)
def nt_policy(p, q):
return IgnoreTransitions(two_transitions(), p, q, 0, 1, save_history=True)
@pytest.mark.parametrize("p, q, n", [
(.1, .2, 10),
(.3, .5, 8),
(.03, .8, 8),
])
def test_rates(p, q, n):
hmm = pq_to_hmm(p, q)
nt_s = nt_policy(p, q)
nt_s.solve(n)
all_data = np.stack([num_to_data(num, n) for num in range(2**n)]).astype(int)
nt_x, applied = nt.evaluate_sigmas(nt_s.sigmas, all_data)
me_s = me_policy(hmm)
nt_res = exact_rates(hmm, n, nt_s, verbosity=2)
me_res = exact_rates(hmm, n, me_s, verbosity=2)
assert nt_res[b"permuted_log_rate"] < me_res[b"permuted_log_rate"]
nontriv = (nt_res[b'permuted_extras'][b'perms'] != torch.arange(2)).any(-1)
total_applied = (nontriv.int().sum(-1) % 2).bool()
x = nt_res[b'permuted_extras'][b'history']['x'][:, -1, :].clone().detach()
assert np.all(nontriv.numpy()[..., :-1] == applied[..., 1:])
x[total_applied] = -x[total_applied]
nt_plisd = nx_to_log_probs(x, m_to_r(pq_to_m(p, q))).transpose()
res_plisd = nt_res[b'permuted_extras'][b'posterior_log_initial_state_dist'].numpy()
assert np.allclose(np.exp(nt_plisd), np.exp(res_plisd))
| 4,190 | 37.1 | 105 | py |
perm_hmm | perm_hmm-master/tests/loss_function_tests.py | import torch
import perm_hmm.loss_functions as lf
from perm_hmm.util import ZERO
def expanded_log_zero_one(state, classification):
sl = state // 2
cl = classification // 2
loss = sl != cl
floss = loss.float()
floss[~loss] = ZERO
log_loss = floss.log()
log_loss[~loss] = 2*log_loss[~loss]
return log_loss
def test_conditional_lzo():
s = torch.tensor([
[0, 4, 0, 3, 3, 3, 0],
[0, 4, 0, 3, 3, 3, 1],
[0, 4, 1, 3, 3, 2, 0],
], dtype=int)
c = torch.tensor([
[0, 4, 0, 2, 2, 3, 0],
[0, 4, 0, 3, 3, 3, 0],
[0, 4, 1, 3, 3, 2, 1],
], dtype=int)
l = lf.expanded_log_zero_one(2)
v = l(s, c)
vp = expanded_log_zero_one(s, c)
assert v.allclose(vp)
| 751 | 22.5 | 49 | py |
perm_hmm | perm_hmm-master/tests/perm_hmm_tests.py | import numpy as np
import torch
import pyro.distributions as dist
from perm_hmm.models.hmms import PermutedDiscreteHMM
from perm_hmm.policies.policy import PermPolicy
from perm_hmm.policies.min_tree import MinEntPolicy
from perm_hmm.policies.rotator_policy import RotatorPolicy, cycles
from perm_hmm.util import ZERO, all_strings, num_to_data, id_and_transpositions
class Shifter(PermPolicy):
def __init__(self, hmm, save_history=False):
self.num_states = hmm.initial_logits.shape[0]
possible_perms = cycles(self.num_states)
super().__init__(possible_perms, save_history=save_history)
def reset(self, save_history=False):
super().reset(save_history=save_history)
def calculate_perm(self, data: torch.Tensor) -> (torch.Tensor, dict):
return self.possible_perms[data.long()], {}
def test_trivial_sample():
"""
Check that the PermutedDiscreteHMM returns the expected sample for a
permutation by checking a trivial model.
"""
initial_logits = torch.tensor([1.-ZERO, ZERO, ZERO, ZERO, ZERO]).log()
initial_logits -= initial_logits.logsumexp(-1)
transition_logits = torch.full((5, 5), ZERO)
transition_logits += torch.eye(5)
transition_logits = transition_logits.log()
transition_logits -= transition_logits.logsumexp(-1)
output_probs = transition_logits.clone().detach().exp()
observation_dist = dist.Categorical(output_probs)
hmm = PermutedDiscreteHMM(initial_logits, transition_logits, observation_dist)
perm_policy = RotatorPolicy(hmm)
perm_policy.reset()
s, o = hmm.sample((5,), perm_policy)
assert s.allclose(torch.arange(5))
assert o.allclose(torch.arange(5))
def test_less_trivial_sample():
initial_logits = torch.tensor([1.-ZERO, ZERO, ZERO, ZERO, ZERO]).log()
initial_logits -= initial_logits.logsumexp(-1)
observation_logits = torch.full((5, 5), ZERO)
observation_logits += torch.eye(5)
observation_logits = observation_logits.log()
observation_logits -= observation_logits.logsumexp(-1)
output_probs = observation_logits.exp()
observation_dist = dist.Categorical(output_probs)
transition_logits = observation_logits.clone().detach()
transition_logits = torch.roll(transition_logits, -1, dims=0)
hmm = PermutedDiscreteHMM(initial_logits, transition_logits, observation_dist)
perm_policy = RotatorPolicy(hmm)
s, o = hmm.sample((5,), perm_policy)
assert s.allclose((2*torch.arange(5)) % 5)
assert o.allclose((2*torch.arange(5)) % 5)
def test_data_dependent():
initial_logits = torch.tensor([1.-ZERO, ZERO, ZERO, ZERO, ZERO]).log()
initial_logits -= initial_logits.logsumexp(-1)
observation_logits = torch.full((5, 5), ZERO)
observation_logits += torch.eye(5)
observation_logits = observation_logits.log()
observation_logits -= observation_logits.logsumexp(-1)
output_probs = observation_logits.exp()
observation_dist = dist.Categorical(output_probs)
transition_logits = observation_logits.clone().detach()
transition_logits = torch.roll(transition_logits, -1, dims=0)
hmm = PermutedDiscreteHMM(initial_logits, transition_logits, observation_dist)
perm_policy = Shifter(hmm)
s, o = hmm.sample((5,), perm_policy)
op = torch.empty_like(o)
op[0] = 0
for i in range(1, 5):
op[i] = (2*op[i-1]+1) % 5
assert o.allclose(op)
perm_policy.reset()
s, o = hmm.sample((10, 5), perm_policy)
assert o.allclose(op)
def test_posterior_log_initial_state_dist():
observation_probs = torch.tensor([.5, 1])
observation_dist = dist.Bernoulli(observation_probs)
possible_perms = torch.tensor([[0, 1], [1, 0]], dtype=int)
transition_logits = torch.tensor([[1 - ZERO, ZERO], [.5, .5]]).log().float()
initial_logits = torch.tensor([.5, .5]).log()
hmm = PermutedDiscreteHMM(initial_logits, transition_logits, observation_dist)
perm_policy = MinEntPolicy(possible_perms, hmm)
# data = torch.tensor([1, 1.0, 0, 1, 0, 0])
data = torch.tensor([
[1.0, 1, 1, 1],
[1.0, 0, 0, 0],
[1.0, 1, 0, 0],
])
# data = torch.tensor([0.0, 1, 1])
perm_policy.reset(save_history=True)
perms = perm_policy.get_perms(data)
d = perm_policy.calc_history
da = perm_policy.tree[0].logits.logsumexp(-1)
dap = hmm.posterior_log_initial_state_dist(data, perms)
assert torch.allclose(dap, da)
def apply_perm(seq, perm):
return perm[torch.arange(len(seq)), seq]
def state_sequence_lp(seq, il, tl, perm):
n = len(seq) - 1
perm_seq = apply_perm(seq, perm)
return il[seq[0]] + tl.expand((n,) + tl.shape)[
torch.arange(n), perm_seq[:-1], seq[1:]
].sum(-1)
def log_joint_at_seq(data, il, tl, od, seq, perm):
n = len(data)
retval = state_sequence_lp(seq, il, tl, perm)
retval += od.log_prob(data[:, None])[torch.arange(n), seq].sum(-1)
return retval
def brute_force_lp(data, il, tl, od, perm):
n = len(data)
retval = -float('inf')
nstates = len(il)
for seq in all_strings(n, base=nstates, dtype=int):
retval = np.logaddexp(retval, log_joint_at_seq(data, il, tl, od, seq, perm).numpy())
return retval
def brute_force_jog_joint(data, il, tl, od, i, perm):
n = len(data)
retval = -float('inf')
nstates = len(il)
for seq in all_strings(n, base=nstates, dtype=int):
seq = torch.cat((torch.tensor([i]), seq))
retval = np.logaddexp(retval, log_joint_at_seq(data, il, tl, od, seq, perm).numpy())
return retval
def test_perm_log_prob():
n = 3
tmax = 5
dirichlet = dist.Dirichlet(torch.ones(n) / n)
initial_logits = (torch.ones(n) / n).log()
transition_logits = dirichlet.sample((n,)).log()
observation_dist = dist.Bernoulli(torch.rand(n))
hmm = PermutedDiscreteHMM(initial_logits, transition_logits, observation_dist)
perm_policy = MinEntPolicy(id_and_transpositions(n), hmm)
i = torch.randint(2**tmax, (1,))
data = num_to_data(i, tmax)
data = data.unsqueeze(-2)
perms = perm_policy.get_perms(data)
lp = hmm.log_prob(data, perms)
bflp = brute_force_lp(data.squeeze(), initial_logits, transition_logits, observation_dist, perms.squeeze())
assert lp.double().isclose(torch.tensor(bflp))
def test_total_logprob():
n = 3
tmax = 3
dirichlet = dist.Dirichlet(torch.ones(n) / n)
initial_logits = (torch.ones(n) / n).log()
transition_logits = dirichlet.sample((n,)).log()
observation_dist = dist.Bernoulli(torch.rand(n))
hmm = PermutedDiscreteHMM(initial_logits, transition_logits, observation_dist)
perm_policy = MinEntPolicy(id_and_transpositions(n), hmm)
data = all_strings(tmax)
perms = perm_policy.get_perms(data)
lp = hmm.log_prob(data, perms)
assert torch.isclose(lp.double().logsumexp(-1), torch.tensor(0.).double(), atol=5e-7)
bflp = torch.zeros_like(lp)
for i, (dat, perm) in enumerate(zip(data, perms)):
bflp[i] = brute_force_lp(dat.squeeze(), initial_logits, transition_logits, observation_dist, perm.squeeze())
assert lp.double().allclose(bflp.double(), atol=5e-7)
| 7,129 | 36.925532 | 116 | py |
perm_hmm | perm_hmm-master/tests/tree_strategy_tests.py | import pytest
import numpy as np
import torch
import pyro.distributions as dist
from example_systems.three_states import three_state_hmm
from perm_hmm.models.hmms import PermutedDiscreteHMM
from perm_hmm.util import all_strings, id_and_transpositions, ZERO
from tests.min_ent import MinEntropyPolicy
from perm_hmm.policies.min_tree import MinTreePolicy
import perm_hmm.log_cost as cf
def simple_hmm():
observation_probs = torch.tensor([.5, 1])
observation_dist = dist.Bernoulli(observation_probs)
possible_perms = torch.tensor([[0, 1], [1, 0], [0, 1]], dtype=int)
transition_logits = torch.tensor([[1 - ZERO, ZERO], [.5, .5]]).log()
initial_logits = torch.tensor([.5, .5]).log()
hmm = PermutedDiscreteHMM(initial_logits, transition_logits, observation_dist)
return hmm, possible_perms
@pytest.mark.parametrize("hmm,possible_perms,num_steps",[
simple_hmm() + (4,),
(three_state_hmm(-3, -4), id_and_transpositions(3), 4),
(three_state_hmm(-1, -4), id_and_transpositions(3), 4),
(three_state_hmm(-5, -4), id_and_transpositions(3), 4),
])
def test_posterior_distributions(hmm, possible_perms, num_steps):
num_states = hmm.initial_logits.shape[0]
all_data = all_strings(num_steps, num_states)
mes2 = MinEntropyPolicy(possible_perms, hmm, save_history=True)
mes1 = MinTreePolicy(possible_perms, hmm, cf.log_initial_entropy, 1, initialize_tree=False)
mes1.initialize_tree(1, data_len=all_data.shape[0])
reverse_perm_dict = {tuple(v): k for k, v in enumerate(possible_perms.numpy().tolist())}
for j in range(num_steps):
mes1.tree.prune_tree(mes1.data_to_idx(all_data[..., j]))
mes1.tree.grow(mes1.possible_perms)
b = mes1.tree[-1]
mes2.belief_state = mes2.belief_state.bayes_update(all_data[..., j])
assert np.allclose(mes2.belief_state.logits.exp().double().numpy(), mes1.tree.beliefs[0].logits.exp().double().numpy(), atol=1e-6)
pl2 = mes2.distributions_for_all_perms()
after_transition_2 = pl2.logsumexp(-1)
after_transition_1 = mes1.tree[1].logits.transpose(0, 1)
assert np.allclose(after_transition_1.exp().double().numpy(), after_transition_2.exp().double().numpy(), atol=1e-6)
pl2 -= pl2.logsumexp(-3, keepdim=True).logsumexp(-2, keepdim=True)
pl2 = torch.from_numpy(np.moveaxis(pl2.numpy(), (-1, -2, -3, -4, -5), (-4,-1,-2,-5,-3)))
assert torch.allclose(pl2.exp().double(), b.logits.exp().double(), atol=1e-6)
perm = mes2.calculate_perm_from_belief(return_dict=False)
perm_idx = torch.tensor([reverse_perm_dict[tuple(p.numpy())] for p in perm])
mes1.tree.prune_tree(perm_idx)
mes2.belief_state = mes2.belief_state.transition(perm.unsqueeze(-2))
@pytest.mark.parametrize("hmm,possible_perms,num_steps",[
simple_hmm() + (4,),
(three_state_hmm(-3, -4), id_and_transpositions(3), 4),
(three_state_hmm(-1, -4), id_and_transpositions(3), 4),
(three_state_hmm(-5, -4), id_and_transpositions(3), 4),
])
def test_posterior_entropy(hmm, possible_perms, num_steps):
num_states = hmm.initial_logits.shape[0]
all_data = all_strings(num_steps, num_states)
mes2 = MinEntropyPolicy(possible_perms, hmm, save_history=True)
mes1 = MinTreePolicy(possible_perms, hmm, cf.log_initial_entropy, 1, initialize_tree=False, save_history=True)
mes1.initialize_tree(1, data_len=all_data.shape[0])
reverse_perm_dict = {tuple(v): k for k, v in enumerate(possible_perms.numpy().tolist())}
for j in range(num_steps):
mes2.belief_state = mes2.belief_state.bayes_update(all_data[..., j])
mes1.tree.prune_tree(mes1.data_to_idx(all_data[..., j]))
mes1.tree.grow(mes1.possible_perms)
perm_tree, costs = mes1.tree.perm_idxs_from_log_cost(mes1.log_cost_func, return_costs=True)
entropy2, distn2 = mes2.cond_entropies_for_all_perms(return_distn=True)
distn1 = hmm.observation_dist.log_prob(hmm.enumerate_support(expand=False))
yk = (mes1.tree[-2].logits.logsumexp(-2).unsqueeze(-3) + distn1.unsqueeze(-2)).logsumexp(-1)
distn1 = (yk.unsqueeze(-1).unsqueeze(-2) + mes1.tree[-1].logits)
distn2 = torch.tensor(np.moveaxis(distn2.numpy(), (-1, -2, -3, -4, -5), (-4,-1,-2,-5,-3)))
assert torch.allclose(distn1.exp().double(), distn2.exp().double(), atol=1e-6)
assert torch.allclose(costs[-2], (costs[-1] + yk).logsumexp(-2), atol=1e-6)
s1yk = distn1.logsumexp(-1)
jointent1 = -(s1yk.exp()*s1yk).sum(-3).sum(-1)
s1 = s1yk.logsumexp(-1)
yent1 = -(s1.exp()*s1).sum(-2)
condent1 = (jointent1 - yent1).transpose(0, 1)
assert torch.allclose(entropy2.double(), condent1.double(), atol=1e-6)
plisd1 = mes1.tree[-1].logits.logsumexp(-1)
log_postinitent = (-(plisd1.exp()*plisd1).sum(-1)).log()
post_ent1 = (yk + log_postinitent).logsumexp(-2).exp().transpose(0, 1)
assert torch.allclose(post_ent1.double(), entropy2.double(), atol=1e-6)
entropy1 = costs[-2].transpose(0, 1).exp()
assert torch.allclose(entropy1.double(), entropy2.double(), atol=1e-6)
perm = mes2.calculate_perm_from_belief(return_dict=False)
perm_idx = torch.tensor([reverse_perm_dict[tuple(p.numpy())] for p in perm])
mes1.tree.prune_tree(perm_idx)
mes2.belief_state = mes2.belief_state.transition(perm.unsqueeze(-2))
# @pytest.mark.parametrize("n_states", [2, 3, 4])
# def test_min_tree_min_ent(n_states):
# n_steps = 5
# hmm = random_phmm(n_states)
# allowed_permutations = id_and_transpositions(n_states)
# mes1 = MinEntropyPolicy(allowed_permutations, hmm)
# mes2 = MinTreePolicy(allowed_permutations, hmm, vf.negative_min_entropy, 1)
# all_data = all_strings(n_steps)
# perms1 = mes1.get_perms(all_data)
# perms2 = mes2.get_perms(all_data)
# assert (perms1 == perms2).all()
| 5,907 | 52.225225 | 138 | py |
perm_hmm | perm_hmm-master/tests/test_min_ent_again.py | from functools import wraps
from functools import reduce
from operator import mul
import numpy as np
import pytest
import torch
import pyro.distributions as dist
from pyro.distributions.hmm import _logmatmulexp
from perm_hmm.models.hmms import PermutedDiscreteHMM
from typing import NamedTuple
from perm_hmm.util import all_strings, id_and_transpositions, ZERO, wrap_index
from example_systems.three_states import three_state_hmm
from tests.min_ent import MinEntropyPolicy as MES
class PostYPostS0(NamedTuple):
r"""
Contains the posterior output distribution, and the
posterior initial distribution.
.. seealso:: return type of :py:meth:`PermutedDiscreteHMM.full_posterior`
"""
log_post_y: torch.Tensor
r""":py:class:`torch.Tensor`, float.
The posterior output distribution :math:`p(y_n | y^{n-1})`
shape ``(n_outcomes, n_perms)``
"""
log_post_init: torch.Tensor
r""":py:class:`torch.Tensor`, float.
The posterior initial state distribution :math:`p(s_0 | y^{n-1})`
shape ``(n_outcomes, n_perms, state_dim)``
"""
class GenDistEntropy(NamedTuple):
"""
Contains the expected posterior entropies and the log posterior
distributions which generate them.
.. seealso:: the return type of
:py:meth:`PermutedDiscreteHMM.expected_entropy`
"""
log_dists: PostYPostS0
""":py:class:`PostYPostS0`
The log distributions used to compute the
posterior entropy.
"""
expected_entropy: torch.Tensor
""":py:class:`torch.Tensor`, float.
The expected posterior entropy.
shape ``(n_perms,)``
"""
class BayesDistribution(object):
"""
An abstract class for the distributions used to compute the posterior
expected entropy.
.. seealso:: Implementations :py:class:`BayesCurrentDistribution`,
:py:class:`BayesCurrentCondInitialDistribution`,
:py:class:`BayesInitialDistribution`
"""
def __init__(self, log_distribution):
self.logits = log_distribution
"""The log likelihoods for this distribution."""
def posterior(self, observation_logits):
"""
The posterior method call signatures are different for the different
instance distributions. At a minimum, each distribution requires the
observation logits.
:param torch.Tensor observation_logits: float.
The log observations distribution.
"""
raise NotImplementedError
class BayesInitialDistribution(BayesDistribution):
r"""
Stores the prior log initial state distribution.
This data structure stores :math:`\log(p(s_0 | y^{i-1}))`,
where :math:`y^{i-1}` is all the data that has been seen so far.
:param torch.Tensor logits: shape `` batch_shape + (state_dim,)``
.. seealso:: Instantiated in :py:class:`PermutedDiscreteHMM`
"""
def posterior(self, observation_logits, prior_s_cond_init):
r"""
Given a set of logits for the newly observed data and the distribution
of the previous state conditional on the initial state, computes the
posterior initial state distribution.
This is performed using Bayes' rule, as in the following expressions.
.. math::
p(s_0|y^i) &=
\frac{p(y_i|s_0, y^{i-1}) p(s_0|y^{i-1})}{
\sum_{s_0} p(y_i|s_0, y^{i-1}) p(s_0|y^{i-1})
} \\
p(y_i|s_0, y^{i-1}) &=
\sum_{s_i} p(y_i|s_i) p(s_i|s_0, y^{i-1}).
:param torch.Tensor observation_logits:
shape ``batch_shape + (1, 1, state_dim)``
:param torch.Tensor prior_s_cond_init:
shape ``batch_shape + (n_perms, state_dim, state_dim)``,
where the last dimension is for s_{i+1}
:returns: either a single posterior distribution or a
whole posterior distribution tensor
shape ``batch_shape + (n_perms, state_dim)``
"""
post_init = (observation_logits + prior_s_cond_init).logsumexp(axis=-1)
post_init = post_init + self.logits.unsqueeze(-2)
post_init = post_init - post_init.logsumexp(-1, keepdims=True)
return post_init
class BayesCurrentDistribution(BayesDistribution):
r"""
Denoting the data seen thus far as :math:`y^{i-1}`,
this class stores the distribution :math:`\log(p(s_i|y^{i-1}))`,
for all possible permutations to be applied to :math:`s_{i-1}`.
:param torch.Tensor logits: shape ``batch_shape + (num_perms, state_dim)``
.. seealso:: Instantiated in :py:class:`PermutedDiscreteHMM`
"""
def posterior(self, observation_logits, transition_logits,
previous_perm_index: torch.Tensor):
r"""
Computes the posterior current state distribution, according to
Bayes rule.
Denoting :math:`p(y|s) = b(y|s)` as the output distribution and
:math:`p(s_j | s_i) = a_{ij}` as
the transition matrix, the Bayes rule update is given by
.. math::
p(s_{i+1} | y^i) &= \sum_{s_{i}} a_{\sigma_i(y^i, s_{i})s_{i+1}}
p(s_i | y^i) \\
p(s_i | y^i) &= \frac{b(y_i | s_i) p(s_i | y^{i-1})}{
\sum_{s_i}b(y_i | s_i)p(s_i | y^{i-1})}
where we have :math:`p(s_i|y^{i-1})` already, and the permutation
:math:`\sigma_i(y^i, s_i)` is yet to be determined, so
we compute for all possibilities.
:param torch.Tensor transition_logits: float. Log transition matrices.
shape ``batch_shape + (num_perms, state_dim, state_dim)``
:param torch.Tensor observation_logits: float. The output distributions.
shape ``batch_shape + (1, 1, state_dim)``
:param torch.Tensor previous_perm_index: The previously applied
permutation index.
shape ``batch_shape``
:returns: tensor shape ``batch_shape + (num_perms, state_dim)``
"""
ind = wrap_index(previous_perm_index)
post_s = self.logits[ind]
post_s = post_s.unsqueeze(-2).unsqueeze(-2)
post_s = post_s + observation_logits
post_s = post_s - post_s.logsumexp(-1, keepdims=True)
post_s = _logmatmulexp(post_s.float(), transition_logits.float())
return post_s.squeeze(-2)
def posterior_y(self, observation_logits):
r"""
Computes :math:`p(y_i|y^{i-1})` for all possible permutations to be
applied to :math:`s_{i-1}`.
From the equation
.. math::
p(y_i|y^{i-1}) = \sum_{s_i} b(y_i|s_i) p(s_i|y^{i-1})
:param observation_logits:
shape ``batch_shape + (n_outcomes, 1, 1, state_dim)``
:returns: shape ``batch_shape + (n_outcomes, n_perms)``
"""
return (self.logits.unsqueeze(-2) + observation_logits).logsumexp(-1).squeeze(-1)
class BayesCurrentCondInitialDistribution(BayesDistribution):
r"""
Stores :math:`p(s_i | s_0, y^{i-1})`,
for all possible permutations that could be applied to :math:`s_{i-1}`.
:param torch.Tensor logits:
shape ``batch_shape + (n_perms, state_dim, state_dim)``,
the last dimension is for :math:`s_i`,
and the second to last dimension is for :math:`s_0`.
.. seealso:: Instantiated in :py:class:`PermutedDiscreteHMM`
"""
def posterior(self, observation_logits, transition_logits,
previous_perm_index: torch.Tensor):
r"""
Computes the posterior for all possible permutations.
Denoting :math:`p(y|s) = b(y|s)` as the output distribution and
:math:`p(s_j | s_i) = a_{ij}` as
the transition matrix, the Bayes rule update is given by
.. math::
p(s_{i+1} | s_0, y^i) &= \sum_{s_i} a_{\sigma_i(y^i, s_i), s_{i+1}}
p(s_i | s_0, y^i) \\
p(s_i| s_0, y^i) &= \frac{b(y_i|s_i) p(s_i | s_0 y^{i-1})}{
\sum_{s_i} b(y_i|s_i) p(s_i|s_0, y^{i-1})}
where we have :math:`p(s_i|s_0, y^{i-1})` already, and the permutation
:math:`\sigma_i(y^i, s_i)` is yet to be determined, so
we compute for all possibilities.
:param torch.Tensor transition_logits: float.
shape ``batch_shape + (num_perms, state_dim, state_dim)``
:param torch.Tensor observation_logits: float.
shape ``batch_shape + (1, 1, state_dim)``
:param torch.Tensor previous_perm_index: int.
The index which encodes the previous permutation.
shape ``batch_shape``
:returns: shape ``batch_shape + (num_perms, state_dim, state_dim)``
"""
ind = wrap_index(previous_perm_index)
post_s_cond_init = self.logits[ind]
post_s_cond_init = post_s_cond_init.unsqueeze(-3)
post_s_cond_init = post_s_cond_init + observation_logits
post_s_cond_init = post_s_cond_init - post_s_cond_init.logsumexp(axis=-1, keepdims=True)
post_s_cond_init = _logmatmulexp(
post_s_cond_init.float(),
transition_logits.float()
)
return post_s_cond_init
class PermSelector(object):
"""
This is an abstract class that is used to select permutations. The
get_perm method is called in-line when sampling with PermutedDiscreteHMM.
The get_perms method uses the get_perm method to compute all the
permutations that would be chosen for all possible runs of data.
See _perm_selector_template for an example of subclassing.
"""
def __init__(self, possible_perms, save_history=False):
n_perms, n_states = possible_perms.shape
if not (possible_perms.long().sort(-1).values ==
torch.arange(n_states, dtype=torch.long).expand(
(n_perms, n_states)
)).all():
raise ValueError("The input permutations are not permutations of "
"the integers [0, ..., n_states]")
self.possible_perms = possible_perms
self._calc_history = {}
self._perm_history = []
self.shape = None
self.save_history = save_history
@classmethod
def manage_shape(cls, get_perm):
"""
A decorator provided to flatten the batch dimensions of the input.
:param get_perm: Permutation method to decorate.
:return: Decorated method.
"""
@wraps(get_perm)
def _wrapper(self, *args, **kwargs):
event_dims = kwargs.get("event_dims", 0)
try:
data_shape = args[0].shape
shape = data_shape[:len(data_shape) - event_dims]
except (AttributeError, IndexError):
shape = None
self_shape = getattr(self, "shape", None)
if (self_shape is None) and (shape is not None):
self.shape = shape
data = args[0]
if shape is not None:
data = data.reshape((reduce(mul, self.shape, 1),) + data_shape[len(data_shape) - event_dims:])
perm = get_perm(self, data, *args[1:], **kwargs)
if shape is not None:
perm = perm.reshape(shape + perm.shape[-1:])
return perm
return _wrapper
@classmethod
def manage_calc_history(cls, get_perm):
"""
WARNING: This decorator changes the return signature of the decorated method.
Given a method which returns a tuple whose first element is a permutation and whose
second element is a dictionary containing ancillary information which is computed to
compute the permutation, returns a method which returns only the permutation, while
appending the ancillary information the self._calc_history
:param get_perm: Method to compute the next permutation.
:return: A method which returns only the permutation.
..seealso:: :py:meth:`perm_hmm.strategies.min_ent.MinEntropySelector.get_perm`
"""
@wraps(get_perm)
def _wrapper(self, *args, **kwargs):
save_history = getattr(self, "save_history", False)
retval = get_perm(self, *args, **kwargs)
perm, calc_history = retval
if save_history:
for k, v in calc_history.items():
try:
self._calc_history[k].append(v)
except KeyError:
self._calc_history[k] = [v]
return perm
return _wrapper
@classmethod
def manage_perm_history(cls, get_perm):
"""
Appends the permutation to self._perm_history.
:param get_perm: Method to get the next permutation. Should return only a permutation.
:return: Same method.
"""
@wraps(get_perm)
def _wrapper(self, *args, **kwargs):
perm = get_perm(self, *args, **kwargs)
self._perm_history.append(perm)
return perm
return _wrapper
@property
def perm_history(self):
if len(self._perm_history) == 0:
return torch.Tensor()
else:
try:
toret = torch.stack(self._perm_history, dim=-2)
except RuntimeError:
return self._perm_history
return toret
@perm_history.setter
def perm_history(self, val):
self._perm_history = val
@perm_history.deleter
def perm_history(self):
del self._perm_history
@property
def calc_history(self):
if len(self._calc_history) == 0:
return self._calc_history
if any([len(v) == 0 for v in self._calc_history.values()]):
return self._calc_history
if self.shape is None:
return self._calc_history
try:
return {k: torch.stack([x.reshape(self.shape + x.shape[1:]) for x in v], dim=-v[0].ndim) for k, v in self._calc_history.items()}
except RuntimeError:
return self._calc_history
@calc_history.setter
def calc_history(self, val):
self._calc_history = val
@calc_history.deleter
def calc_history(self):
del self._calc_history
def get_perm(self, data: torch.Tensor, shape=()):
"""
Takes a (vectorized) input of data from a single time step,
and returns a (correspondingly shaped) permutation.
:param torch.Tensor data: Data from the HMM.
shape ``sample_shape + batch_shape + hmm.observation_dist.event_shape``
:param save_history: A flag indicating whether or not to
save the history of the computation involved to produce the
permutations. The function shouldn't return anything
different even if this flag is true, but the history should be
available in the .history attribute at the end of the run.
:return: The permutation to be applied at the next time step.
shape ``(n_batches, n_states)``
"""
raise NotImplementedError
def reset(self, save_history=False):
self.shape = None
self._perm_history = []
self.save_history = save_history
self._calc_history = {}
def get_perms(self, data, time_dim=-1):
r"""
Given a run of data, returns the permutations which would be applied.
This should be used to precompute the permutations for a given model
and given data sequence.
:param torch.Tensor data: float.
The sequence of data to compute the optimal permutations for
shape ``batch_shape + (time_dim,)``
:returns: A :py:class:`torch.Tensor` type :py:class:`int`
containing the optimal permutations to have applied.
shape ``batch_shape + (time_dim, num_states)``
"""
d_shape = data.shape
m = len(d_shape)
if time_dim < 0:
obs_event_dim = -(time_dim + 1)
else:
obs_event_dim = m - (time_dim + 1)
shape = d_shape[:m - obs_event_dim]
max_t = shape[-1]
perms = []
for i in range(max_t):
perms.append(self.get_perm(
data[(..., i) + (
slice(None),) * obs_event_dim],
))
perms = torch.stack(perms, -2)
class MinEntropyPolicy(PermSelector):
"""
A strategy for selecting permutations by choosing the one which gives the minimum
expected posterior entropy of the initial state distribution given the
past data and the next step of data, as yet unseen.
"""
def __init__(self, possible_perms, hmm, save_history=False):
# TODO: Fix this class to work with heterogeneous hmms
super().__init__(possible_perms, save_history=save_history)
self.hmm = hmm
self.step = 0
n_perms = len(possible_perms)
self.prior_log_inits = \
BayesInitialDistribution(self.hmm.initial_logits.clone().detach())
"""
a :py:class:`BayesInitialDistribution`. Used to compute posterior
initial state distributions.
"""
self.prior_log_current = \
BayesCurrentDistribution(
self.hmm.initial_logits.clone().detach().repeat(n_perms, 1)
)
r"""
a :py:class:`BayesCurrentDistribution`. Used to compute
distributions of the form :math:`p(s_n|y^{i-1})`.
"""
prior_log_cur_cond_init = \
(torch.eye(len(self.hmm.initial_logits)) + ZERO).log()
prior_log_cur_cond_init -= \
prior_log_cur_cond_init.logsumexp(axis=-1, keepdim=True)
self.prior_log_cur_cond_init = \
BayesCurrentCondInitialDistribution(
prior_log_cur_cond_init.repeat(n_perms, 1, 1)
)
@property
def reverse_perm_dict(self):
return {
tuple(val.tolist()): torch.tensor(key, dtype=torch.long)
for key, val in enumerate(self.possible_perms)
}
@reverse_perm_dict.setter
def reverse_perm_dict(self, val):
self.possible_perms = torch.stack(tuple(val.values()))
def to_perm_index(self, perm):
"""
Dualizes a permutation to its index in the :attr:`possible_perms`
array.
:param torch.Tensor perm: int. The perm to convert to an index.
shape ``batch_shape + (state_dim,)``
:returns: :py:class:`torch.Tensor`, int.
shape ``batch_shape``
"""
shape = perm.shape
if len(shape) == 1:
return self.reverse_perm_dict[tuple(perm.tolist())]
flat_shape = (reduce(mul, shape[:-1]),) + shape[-1:]
re_perm = perm.reshape(flat_shape)
pi = torch.empty(flat_shape[:-1], dtype=torch.long)
for i in range(re_perm.shape[0]):
pi[i] = self.reverse_perm_dict[tuple(re_perm[i].tolist())]
pi.reshape(shape[:-1])
return pi
def reset(self, save_history=False):
"""
Resets the policy.
"""
super().reset(save_history=save_history)
n_perms = len(self.possible_perms)
self.prior_log_inits.logits = self.hmm.initial_logits.clone().detach()
self.prior_log_current.logits = \
self.hmm.initial_logits.clone().detach().repeat(n_perms, 1)
log_state_cond_initial_dist = \
(torch.eye(len(self.hmm.initial_logits)) + ZERO).log()
log_state_cond_initial_dist -= \
log_state_cond_initial_dist.logsumexp(axis=-1, keepdim=True)
self.prior_log_cur_cond_init.logits = \
log_state_cond_initial_dist.repeat(n_perms, 1, 1)
self.step = 0
def update_prior(self, val):
"""
Given a new observation and the permutation applied last,
updates all the distributions being tracked.
:param torch.Tensor val: torch.float an observed data point.
This is :math:`y_i`.
shape ``batch_shape``
"""
n_states = len(self.hmm.initial_logits)
shape = val.shape
if len(self._perm_history) == 0:
total_batches = shape[0]
self.prior_log_current.logits = \
self.prior_log_current.logits.expand(total_batches, -1, -1)
self.prior_log_cur_cond_init.logits = \
self.prior_log_cur_cond_init.logits.expand(
total_batches, -1, -1, -1)
prev_perm = torch.arange(n_states, dtype=int)
else:
prev_perm = self._perm_history[-1]
prev_perm = prev_perm.expand((shape[0],) + (n_states,))
prev_perm_index = self.to_perm_index(prev_perm)
transition_logits = self.hmm.transition_logits[self.possible_perms]
if len(self.hmm.observation_dist.batch_shape) >= 2:
observation_logits = self.hmm.observation_dist.log_prob(
val.unsqueeze(-1).unsqueeze(-1)
)[..., self.step, :].unsqueeze(-2).unsqueeze(-2)
else:
observation_logits = \
self.hmm.observation_dist.log_prob(
val.unsqueeze(-1)
).unsqueeze(-2).unsqueeze(-2)
prior_s_cond_init = self.prior_log_cur_cond_init.logits
post_log_initial_dist = \
self.prior_log_inits.posterior(
observation_logits,
prior_s_cond_init
)
ind = wrap_index(prev_perm_index)
self.prior_log_inits.logits = post_log_initial_dist[ind]
post_log_state_dist = \
self.prior_log_current.posterior(observation_logits,
transition_logits, prev_perm_index)
self.prior_log_current.logits = post_log_state_dist
post_log_state_cond_initial_dist = \
self.prior_log_cur_cond_init.posterior(observation_logits,
transition_logits,
prev_perm_index)
self.prior_log_cur_cond_init.logits = post_log_state_cond_initial_dist
def full_posterior(self):
r"""
Computes the distributions needed to compute the posterior conditional
entropy, which depends on yet to be seen data.
:returns: a :py:class:`PostYPostS0` object, containing
log_post_y: the posterior distribution
:math:`\log(p(y_i | y^{i-1}))`
shape ``(n_outcomes, n_perms)``
log_post_init: the posterior distribution
:math:`\log(p(s_0| y_i, y^{i-1}))`.
shape ``(n_outcomes, n_perms, state_dim)``
.. seealso:: method :py:meth:`PermutedDiscreteHMM.expected_entropy`
"""
possible_outputs = \
self.hmm.observation_dist.enumerate_support(False) \
.squeeze().unsqueeze(-1)
observation_logits = \
self.hmm.observation_dist.log_prob(
possible_outputs,
).float().unsqueeze(-2).unsqueeze(-2)
for x in range(len(self.prior_log_inits.logits.shape) - 1):
observation_logits.unsqueeze_(-2)
log_post_y = self.prior_log_current.posterior_y(observation_logits)
log_post_init = \
self.prior_log_inits.posterior(
observation_logits,
self.prior_log_cur_cond_init.logits,
)
return PostYPostS0(log_post_y, log_post_init)
def expected_entropy(self, output_distributions=False):
r"""
Computes the expected conditional entropy for all the permutations.
:param bool output_distributions: indicates whether to return the
tensor of posterior log initial state distributions and posterior
log y distributions along with the entropy.
:returns: Either a torch.Tensor of shape ``(n_perms,)`` or a
:py:class:`GenDistEntropy` object, containing as its leaves,
log_dists.log_post_y: Posterior log y distribution, i.e.
:math:`\log(p(y_i | y^{i-1}))`.
shape ``(n_outcomes, n_perms)``
log_dists.log_post_init: Posterior log initial state distribution.
i.e. :math:`\log(p(s_0 | y_i, y^{i-1}))`
shape ``(n_outcomes, n_perms, state_dim)``
expected_entropy: expected posterior entropy for each possible
permutation.
shape ``(n_perms,)``
.. seealso:: method :py:meth:`PermutedDiscreteHMM.full_posterior`
"""
postyposts1 = self.full_posterior()
plid = postyposts1.log_post_init
plyd = postyposts1.log_post_y
pliyd = plid + plyd.unsqueeze(-1)
entropy = (-pliyd.exp() * plid).sum(axis=-1).sum(axis=0)
if output_distributions:
return GenDistEntropy(PostYPostS0(plyd, plid), entropy)
return entropy
def calculate_perm(self, data: torch.Tensor) -> (torch.Tensor, dict):
r"""
Given data, returns the permutation which should be applied to the HMM before the next step, based on a minimum
posterior entropy heuristic.
:param data: Data from the HMM, used to update the computed distributions.
:return: A tuple. First element is
perm: :py:class:`torch.Tensor`
dtype :py:class:`int`,
Next permutation to apply.
shape ``batch_shape + (state_dim,)``
Second element is a dict, containing keys
b"dist_array": A
:py:class:`torch.Tensor` containing :math:`\log(p(s_0|y^i))`
shape ``batch_shape + (state_dim,)``
b"entropy_array": A :py:class:`torch.Tensor`
containing
:math:`\operatorname{min}_{\sigma}H_\sigma(S_0|Y^i, y^{i-1})`
shape ``batch_shape``
"""
self.update_prior(data)
entropy = self.expected_entropy()
entropy_array, perm_index = entropy.min(dim=-1)
perm = self.possible_perms[perm_index]
self.step += 1
return perm, {b"dist_array": self.prior_log_inits.logits.clone().detach(), b"entropy_array": entropy_array}
def _calculate_dists(self, data, perms):
shape = perms.shape[:-1]
if not data.shape[:len(shape)] == shape:
raise ValueError("Data and permutations must have same shape")
try:
_ = self.hmm.log_prob(data)
except (ValueError, RuntimeError) as e:
raise ValueError("Data does not have a compatible shape") from e
lperms = [torch.tensor(x) for x in perms.tolist()]
self.reset(save_history=True)
for i in range(shape[-1]):
perm = self.get_perm(data[(..., i) + (slice(None),)*(len(data.shape)-len(shape))])
self._perm_history = lperms[:i]
return self.calc_history[b"dist_array"]
def simple_hmm():
observation_probs = torch.tensor([.5, 1])
observation_dist = dist.Bernoulli(observation_probs)
possible_perms = torch.tensor([[0, 1], [1, 0], [0, 1]], dtype=int)
transition_logits = torch.tensor([[1 - ZERO, ZERO], [.5, .5]]).log()
initial_logits = torch.tensor([.5, .5]).log()
hmm = PermutedDiscreteHMM(initial_logits, transition_logits, observation_dist)
return hmm, possible_perms
@pytest.mark.parametrize("hmm,possible_perms,num_steps", [
simple_hmm() + (4,),
(three_state_hmm(-3, -4), id_and_transpositions(3), 4),
(three_state_hmm(-1, -4), id_and_transpositions(3), 4),
(three_state_hmm(-5, -4), id_and_transpositions(3), 4),
])
def test_posterior_distributions(hmm, possible_perms, num_steps):
num_states = hmm.initial_logits.shape[0]
all_data = all_strings(num_steps, num_states)
mes1 = MinEntropyPolicy(possible_perms, hmm, save_history=True)
mes2 = MES(possible_perms, hmm, save_history=True)
for j in range(num_steps):
mes1.update_prior(all_data[..., j])
ply, pli = mes1.full_posterior()
mes2.belief_state = mes2.belief_state.bayes_update(all_data[..., j])
pl = mes2.distributions_for_all_perms()
ply2 = pl.logsumexp(-3).logsumexp(-2)
ply2 = torch.tensor(np.moveaxis(ply2.numpy(), (-1, -2, -3), (-3, -1, -2)))
pli2 = pl.logsumexp(-2)
pli2 = pli2 - pli2.logsumexp(-2, keepdim=True)
pli2 = torch.tensor(np.moveaxis(pli2.numpy(), (-1, -2, -3, -4), (-4, -1, -2, -3)))
assert torch.allclose(ply.exp().double(), ply2.exp().double(), atol=1e-6)
assert torch.allclose(pli.exp().double(), pli2.exp().double(), atol=1e-6)
perm = mes2.calculate_perm_from_belief(return_dict=False)
mes1._perm_history.append(perm)
mes2.belief_state = mes2.belief_state.transition(perm.unsqueeze(-2))
@pytest.mark.parametrize("hmm,possible_perms,num_steps",[
simple_hmm() + (4,),
(three_state_hmm(-3, -4), id_and_transpositions(3), 4),
(three_state_hmm(-1, -4), id_and_transpositions(3), 4),
(three_state_hmm(-5, -4), id_and_transpositions(3), 4),
])
def test_posterior_entropy(hmm, possible_perms, num_steps):
num_states = hmm.initial_logits.shape[0]
all_data = all_strings(num_steps, num_states)
mes1 = MinEntropyPolicy(possible_perms, hmm, save_history=True)
mes2 = MES(possible_perms, hmm, save_history=True)
for j in range(num_steps):
mes1.update_prior(all_data[..., j])
entropy1 = mes1.expected_entropy()
mes2.belief_state = mes2.belief_state.bayes_update(all_data[..., j])
entropy2 = mes2.cond_entropies_for_all_perms()
assert torch.allclose(entropy1.double(), entropy2.double(), atol=1e-6)
perm = mes2.calculate_perm_from_belief(return_dict=False)
mes1._perm_history.append(perm)
mes2.belief_state = mes2.belief_state.transition(perm.unsqueeze(-2))
# @pytest.mark.parametrize("hmm,possible_perms,num_steps",[
# simple_hmm() + (4,),
# (three_state_hmm(-3, -4), id_and_transpositions(3), 4),
# (three_state_hmm(-1, -4), id_and_transpositions(3), 4),
# (three_state_hmm(-5, -4), id_and_transpositions(3), 4),
# ])
# def test_posterior_perms(hmm, possible_perms, num_steps):
# num_states = hmm.initial_logits.shape[0]
# all_data = all_strings(num_steps, num_states)
# mes1 = MinEntropyPolicy(possible_perms, hmm, save_history=True)
# mes2 = MES(possible_perms, hmm, save_history=True)
# for j in range(num_steps):
# perm1 = mes1.get_perm(all_data[..., j])
# perm2 = mes2.get_perm(all_data[..., j])
# assert torch.all(perm1 == perm2)
# This test fails because of numerical precision issues.
# @pytest.mark.parametrize("hmm,possible_perms,num_steps",[
# simple_hmm() + (4,),
# (three_state_hmm(-3, -4), id_and_transpositions(3), 4),
# (three_state_hmm(-1, -4), id_and_transpositions(3), 4),
# (three_state_hmm(-5, -4), id_and_transpositions(3), 4),
# ])
# def test_min_ent_consistent(hmm, possible_perms, num_steps):
# all_data = all_strings(num_steps)
# mes1 = min_ent.MinEntropyPolicy(possible_perms, hmm, save_history=True)
# mes2 = min_ent_again.MinimumEntropyPolicy(possible_perms, hmm, save_history=True)
# all_perms_1 = mes1.get_perms(all_data)
# all_perms_2 = mes2.get_perms(all_data)
# e1 = mes1.calc_history[b"entropy_array"]
# e2 = mes2.calc_history[b"entropy"]
# assert (all_perms_1 == all_perms_2).all()
# assert torch.stack(e2).T.allclose(e1, atol=1e-6)
| 31,332 | 37.778465 | 140 | py |
perm_hmm | perm_hmm-master/tests/perm_selector_tests.py | import pytest
import unittest
from copy import deepcopy
import numpy as np
import torch
import pyro.distributions as dist
from perm_hmm.models.hmms import DiscreteHMM, PermutedDiscreteHMM
from perm_hmm.policies.min_tree import MinEntPolicy
from perm_hmm.util import bin_ent, ZERO, perm_idxs_from_perms
def get_marginals_and_conditionals(perm_policy):
state = perm_policy.belief_state.transition(perm_policy.possible_perms)
cur_cond_init = state.logits - state.logits.logsumexp(-1, keepdim=True)
prior_log_current = state.logits.logsumexp(-2)
prior_log_inits = state.logits.logsumexp(-1)[:, 0, :]
return prior_log_inits, prior_log_current, cur_cond_init
def setUp(self):
self.observation_probs = torch.tensor([.5, 1])
self.observation_dist = dist.Bernoulli(self.observation_probs)
self.possible_perms = torch.tensor([[0, 1],
[1, 0]], dtype=int)
self.transition_logits = torch.tensor([[1-ZERO, ZERO], [.5, .5]]).log().float()
self.initial_logits = torch.tensor([.5, .5]).log()
self.bdhmm = PermutedDiscreteHMM(self.initial_logits,
self.transition_logits,
self.observation_dist)
self.shmm = DiscreteHMM(self.initial_logits,
self.transition_logits,
self.observation_dist)
self.perm_policy = MinEntPolicy(self.possible_perms, self.bdhmm)
self.integration_time = 1
transition_logits = torch.tensor([[1 - ZERO, ZERO], [.5, .5]]).log()
initial_logits = torch.tensor([.5, .5]).log()
observation_probs = torch.tensor([.5, 1])
observation_dist = dist.Bernoulli(observation_probs)
my_hmm = PermutedDiscreteHMM(initial_logits, transition_logits, observation_dist)
pyro_hmm = DiscreteHMM(initial_logits, transition_logits, observation_dist)
possible_perms = torch.tensor([[0, 1], [1, 0]], dtype=int)
data_0 = torch.tensor([1.0, 1, 0])
data_1 = torch.tensor([1, 1.0, 0, 1, 0, 0])
data_2 = torch.tensor([1.0, 1, 1])
data_3 = torch.tensor([0.0, 1, 1])
@pytest.mark.parametrize('data,hmm', [
(data_0, my_hmm),
(data_1, my_hmm),
(data_0, pyro_hmm),
(data_2, my_hmm),
(data_3, my_hmm),
])
def test_posterior_init(data, hmm):
perm_policy = MinEntPolicy(possible_perms, my_hmm)
perm_policy.reset(save_history=True)
idperms = possible_perms[torch.zeros(data.shape[0], dtype=int)]
penultimates = perm_policy.penultimates_from_sequence(data, idperms)
plisd = penultimates[-1].logsumexp(-1)
spinit = hmm.posterior_log_initial_state_dist(data)
assert plisd.allclose(spinit)
@pytest.mark.parametrize('hmm', [
my_hmm,
])
def test_data_stack(hmm):
all_data = torch.tensor([list(map(int, ("{{0:0{}b}}".format(6)).format(j))) for j in range(2 ** 6)], dtype=torch.float)
slp = hmm.log_prob(all_data)
sllp = torch.tensor([hmm.log_prob(data) for data in all_data])
assert slp.allclose(sllp)
def test_no_perms():
data = data_0
perm_policy = MinEntPolicy(possible_perms, my_hmm)
perm_policy.reset(save_history=True)
data_idx = perm_policy.data_to_idx(data)
trivial_perm = torch.arange(perm_policy.hmm.initial_logits.shape[0])
trivial_perm_idx = perm_idxs_from_perms(possible_perms, trivial_perm)
hand_calc_inits = [
torch.tensor([1 / 3, 2 / 3]),
torch.tensor([1 / 4, 3 / 4]),
torch.tensor([1 / 3, 2 / 3]),
]
hand_calc_currents = [
torch.tensor([2 / 3, 1 / 3]),
torch.tensor([3 / 4, 1 / 4]),
torch.tensor([1, ZERO]),
]
hand_calc_current_cond_inits = [
transition_logits.exp(),
torch.tensor([[1, ZERO], [2 / 3, 1 / 3]]),
torch.tensor([[1, ZERO], [1, ZERO]]),
]
for step, i, c, cci in zip(range(3), hand_calc_inits, hand_calc_currents, hand_calc_current_cond_inits):
perm_policy.tree.prune_tree(data_idx[step])
perm_policy.tree.grow()
state = perm_policy.tree.beliefs[-2]
current = state.logits.logsumexp(-2)[trivial_perm_idx, 0, :]
assert current.exp().allclose(c, atol=1e-7)
init = state.logits.logsumexp(-1)[trivial_perm_idx, 0, :]
assert init.exp().allclose(i, atol=1e-7)
cur_cond_init = state.logits - state.logits.logsumexp(-1, keepdim=True)
cur_cond_init = cur_cond_init[trivial_perm_idx, 0, :, :]
assert cur_cond_init.exp().allclose(cci, atol=1e-7)
perm_policy.tree.prune_tree(trivial_perm_idx)
def test_with_perm():
perms = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=int)
perm_idxs = perm_idxs_from_perms(possible_perms, perms)
perm_policy = MinEntPolicy(possible_perms, my_hmm, save_history=True)
data = data_0
data_idxs = perm_policy.data_to_idx(data)
hand_calc_inits = [
torch.tensor([1 / 3, 2 / 3]),
torch.tensor([3 / 7, 4 / 7]),
torch.tensor([1 / 3, 2 / 3]),
]
hand_calc_currents = [
torch.tensor([5 / 6, 1 / 6]),
torch.tensor([6 / 7, 1 / 7]),
torch.tensor([1 - ZERO, ZERO]),
]
hand_calc_cur_cond_inits = [
transition_logits[perms[0]].exp(),
torch.tensor([[2 / 3, 1 / 3], [1 - ZERO, ZERO]]),
torch.tensor([[1 - ZERO, ZERO], [1 - ZERO, ZERO]]),
]
for step, i, c, cci in zip(range(3), hand_calc_inits, hand_calc_currents, hand_calc_cur_cond_inits):
data_idx = data_idxs[step]
perm_policy.tree.prune_tree(data_idx)
perm_policy.tree.grow()
state = perm_policy.tree.beliefs[-2]
perm_idx = perm_idxs[step]
current = state.logits.logsumexp(-2)[perm_idx, 0, :]
assert current.exp().allclose(c, atol=1e-7)
init = state.logits.logsumexp(-1)[perm_idx, 0, :]
assert init.exp().allclose(i, atol=1e-7)
cur_cond_init = state.logits - state.logits.logsumexp(-1, keepdim=True)
cur_cond_init = cur_cond_init[perm_idx, 0, :, :]
assert cur_cond_init.exp().allclose(cci, atol=1e-7)
perm_policy.tree.prune_tree(perm_idx)
def test_with_all_vals():
perms = torch.tensor([[0, 1], [1, 0], [0, 1]], dtype=int)
perm_idxs = perm_idxs_from_perms(possible_perms, perms)
perm_policy = MinEntPolicy(possible_perms, my_hmm, save_history=True)
data = data_0
data_idxs = perm_policy.data_to_idx(data)
perm_policy.tree.prune_tree(data_idxs[0])
perm_policy.tree.grow()
plid = perm_policy.tree.beliefs[-1].logits.logsumexp(-1)[perm_idxs[0], :, 0, :]
t1 = plid.exp()
t2 = torch.tensor([[1/2, 1/2], [1/4, 3/4]])
assert t1.allclose(t2, atol=3e-07)
perm_policy.tree.prune_tree(perm_idxs[0])
perm_policy.tree.prune_tree(data_idxs[1])
perm_policy.tree.grow()
state = perm_policy.tree.beliefs[-2].logits[perm_idxs[1], 0, :, :]
cur_cond_init = state - state.logsumexp(-1, keepdim=True)
prior_log_current = state.logsumexp(-2)
assert prior_log_current.exp().allclose(torch.tensor(
[3/4, 1/4]
), atol=3e-07)
assert cur_cond_init.exp().allclose(torch.tensor(
[[1/2, 1/2],
[5/6, 1/6]]
), atol=3e-07)
def test_entropies():
# perms = torch.tensor([[0, 1], [0, 1], [1, 0], [0, 1]], dtype=int)
# perm_idxs = perm_idxs_from_perms(possible_perms, perms)
perm_policy = MinEntPolicy(possible_perms, my_hmm, save_history=True)
data = data_0
data_idxs = perm_policy.data_to_idx(data)
perm_policy.reset(save_history=False)
# ent = perm_policy.expected_entropy()
# should_ent = 3/4*bin_ent(torch.tensor([2/3]).log())
# assertTrue(ent[0].allclose(should_ent))
perm_policy.tree.prune_tree(data_idxs[0])
perm_policy.tree.grow()
perm_choice, ent = perm_policy.tree.perm_idxs_from_log_cost(perm_policy.log_cost_func, return_log_costs=True)
should_ent = 5/12*bin_ent(torch.tensor([1/5]).log()) + 7/12*bin_ent(torch.tensor([3/7]).log())
assert ent[-2][1, 0].exp().allclose(should_ent)
should_ent = 1/3*bin_ent(torch.tensor([1/2]).log()) + 2/3*bin_ent(torch.tensor([1/4]).log())
assert ent[-2][0, 0].exp().allclose(should_ent)
def test_reset():
data = data_0
perm_policy = MinEntPolicy(possible_perms, my_hmm, save_history=True)
copy_policy = deepcopy(perm_policy)
num_states = initial_logits.shape[0]
idperm = torch.arange(0, num_states, dtype=int)
for datum in data:
perm_policy.tree.prune_tree(perm_policy.data_to_idx(datum))
perm_policy.tree.grow()
perm_policy.tree.prune_tree(perm_idxs_from_perms(possible_perms, idperm))
perm_policy.reset()
assert all([torch.all(belief.logits.eq(copy_belief.logits)) for belief, copy_belief in zip(perm_policy.tree.beliefs, copy_policy.tree.beliefs)])
| 8,690 | 38.148649 | 148 | py |
perm_hmm | perm_hmm-master/tests/skip_first_tests.py | import numpy as np
import torch
import pyro.distributions as dist
from perm_hmm.models.hmms import SkipFirstDiscreteHMM
from perm_hmm.util import num_to_data, all_strings
def state_sequence_lp(seq, il, tl):
n = len(seq) - 1
return il[seq[0]] + tl.expand((n,) + tl.shape)[
torch.arange(n), seq[:-1], seq[1:]].sum(-1)
def log_joint_at_seq(data, il, tl, od, seq):
n = len(data)
retval = state_sequence_lp(seq, il, tl)
retval += od.log_prob(data[:, None])[torch.arange(n), seq[1:]].sum(-1)
return retval
def brute_force_skip_first_lp(data, il, tl, od):
n = len(data)
retval = -float('inf')
nstates = len(il)
for seq in all_strings(n+1, base=nstates, dtype=int):
retval = np.logaddexp(retval, log_joint_at_seq(data, il, tl, od, seq).numpy())
return retval
def brute_force_skip_first_jog_joint(data, il, tl, od, i):
n = len(data)
retval = -float('inf')
nstates = len(il)
for seq in all_strings(n, base=nstates, dtype=int):
seq = torch.cat((torch.tensor([i]), seq))
retval = np.logaddexp(retval, log_joint_at_seq(data, il, tl, od, seq).numpy())
return retval
def test_skipfirst_logprob():
n = 3
tmax = 5
dirichlet = dist.Dirichlet(torch.ones(n) / n)
initial_logits = (torch.ones(n) / n).log()
transition_logits = dirichlet.sample((n,)).log()
observation_dist = dist.Bernoulli(torch.rand(n))
sfhmm = SkipFirstDiscreteHMM(initial_logits, transition_logits, observation_dist)
i = torch.randint(2**tmax, (1,))
data = num_to_data(i, tmax)
sflp = sfhmm.log_prob(data)
bfsflp = brute_force_skip_first_lp(data, initial_logits, transition_logits, observation_dist)
assert sflp.isclose(torch.tensor(bfsflp))
def test_total_skipfirst_logprob():
n = 3
tmax = 5
dirichlet = dist.Dirichlet(torch.ones(n) / n)
initial_logits = (torch.ones(n) / n).log()
transition_logits = dirichlet.sample((n,)).log()
observation_dist = dist.Bernoulli(torch.rand(n))
sfhmm = SkipFirstDiscreteHMM(initial_logits, transition_logits, observation_dist)
data = all_strings(5)
assert torch.isclose(sfhmm.log_prob(data).logsumexp(-1), torch.tensor(0.).double(), atol=5e-7)
def test_skipfirst_plisd():
n = 3
tmax = 5
dirichlet = dist.Dirichlet(torch.ones(n) / n)
initial_logits = (torch.ones(n) / n).log()
transition_logits = dirichlet.sample((n,)).log()
observation_dist = dist.Bernoulli(torch.rand(n))
sfhmm = SkipFirstDiscreteHMM(initial_logits, transition_logits, observation_dist)
i = torch.randint(2**tmax, (1,))
data = num_to_data(i, tmax)
sfplisd = sfhmm.posterior_log_initial_state_dist(data)
bfsfplisd = torch.tensor([brute_force_skip_first_jog_joint(data, initial_logits, transition_logits, observation_dist, i) for i in range(n)]) - sfhmm.log_prob(data)
assert sfplisd.allclose(bfsfplisd.float())
def test_skipfirst_plisd_all():
n = 3
tmax = 5
dirichlet = dist.Dirichlet(torch.ones(n) / n)
initial_logits = (torch.ones(n) / n).log()
transition_logits = dirichlet.sample((n,)).log()
observation_dist = dist.Bernoulli(torch.rand(n))
sfhmm = SkipFirstDiscreteHMM(initial_logits, transition_logits, observation_dist)
i = torch.randint(2**tmax, (1,))
data = all_strings(tmax)
sfplisd = sfhmm.posterior_log_initial_state_dist(data)
bfsfplisd = torch.tensor([
[brute_force_skip_first_jog_joint(dat, initial_logits, transition_logits, observation_dist, i) for i in range(n)]
for dat in data
])
bfsfplisd = bfsfplisd - sfhmm.log_prob(data)[:, None]
assert sfplisd.allclose(bfsfplisd.float())
| 3,666 | 35.67 | 167 | py |
perm_hmm | perm_hmm-master/tests/test_exhaustive.py | import pytest
from operator import mul
from functools import reduce
import numpy as np
from scipy.special import logsumexp
import matplotlib.pyplot as plt
import torch
import pyro.distributions as dist
import adapt_hypo_test.two_states.util as twotil
from perm_hmm.models.hmms import PermutedDiscreteHMM, random_phmm
from perm_hmm.util import id_and_transpositions, all_strings, log1mexp
from perm_hmm.policies.ignore_transitions import IgnoreTransitions
from perm_hmm.policies.exhaustive import ExhaustivePolicy
from perm_hmm.simulator import HMMSimulator
from perm_hmm.policies.belief_tree import HMMBeliefTree
ZERO = 1e-100
@pytest.mark.parametrize("p,q,steps", [(0.1, 0.3, 8), (0.1, 0.2, 8), (0.01, 0.8, 8)])
def test_exhaustive_value(p, q, steps):
print("p = {}, q = {}".format(p, q))
il = (torch.ones(2)/2).log()
observation_dist = dist.Bernoulli(torch.tensor([p, 1-q]))
transition_logits = torch.tensor([[1, ZERO], [ZERO, 1]]).log()
hmm = PermutedDiscreteHMM(il, transition_logits, observation_dist)
possible_perms = torch.tensor([[0, 1], [1, 0]])
es = ExhaustivePolicy(possible_perms, hmm, steps)
costs = es.compute_perm_tree(return_log_costs=True, delete_belief_tree=False)
vvv = log1mexp(costs[0].ravel()).exp().numpy().item()
sim = HMMSimulator(hmm)
ep, ed = sim.all_classifications(steps, perm_policy=es, verbosity=1)
eperms = ed[b'perms'].numpy()
ve = ep.log_misclassification_rate().exp().numpy()
it = IgnoreTransitions(possible_perms, p, q, 0, 1)
v = it.solve(steps)
ip, idict = sim.all_classifications(steps, perm_policy=it, verbosity=2)
r = twotil.m_to_r(twotil.pq_to_m(p, q))
istories = twotil.nx_to_log_odds(idict[b'history']['x'].numpy(), r)
plt.plot(istories.transpose())
plt.show()
beliefs = []
for i in range(steps):
lps = es.belief_tree.beliefs[-1-2*i].logits
perms = ed[b'perms'][torch.arange(2**(steps-i))*2**i]
perms = perms[:, :steps-i]
b = select_perm_path(lps, es.possible_perms, perms)
for j in range(i):
b = b.unsqueeze(-3)
b = torch.tile(b, (1,)*(len(b.shape) - 3) + (2, 1, 1))
b = b.reshape((-1, 2, 2))
beliefs.append(b)
beliefs = beliefs[::-1]
odds = []
for b in beliefs:
b = b.logsumexp(-1).numpy()
odds.append(b[..., 1] - b[..., 0])
odds = np.stack(odds)
plt.plot(odds)
plt.show()
possible_lps = twotil.lp_grid(steps, twotil.m_to_r(twotil.pq_to_m(p, q)))
assert np.all(np.any(np.all(np.isclose(np.exp(possible_lps.reshape(2, -1)[:, None, :]), np.exp(es.belief_tree.beliefs[-1].logits.logsumexp(-1).squeeze(-2).reshape((-1, 2)).transpose(0, 1).numpy()[:, :, None]), atol=1e-7), axis=0), axis=-1))
iperms = idict[b'perms'].numpy()
# assert np.all(eperms == iperms)
v = np.exp(twotil.log1mexp((logsumexp(v.ravel()) - np.log(2)).item()))
print ("vvv = {}".format(vvv))
print("ve = {}".format(ve))
print("v = {}".format(v))
b = select_perm_path(es.belief_tree.beliefs[-1].logits, es.possible_perms, ed[b'perms'])
s = all_strings(steps)
log_min_entropy = (-(b.logsumexp(-1).max(-1)[0])).log()
lps = hmm.log_prob(s, ed[b'perms'])
rrrate = log1mexp((lps + (- (log_min_entropy.exp()))).logsumexp(-1)).exp().numpy()
assert np.allclose(rrrate, v, atol=1e-7)
assert np.allclose(ve, v, atol=1e-7)
assert np.allclose(vvv, v, atol=5e-7)
def indices_of(perm, possible_perms):
retval = (perm.unsqueeze(-2) == possible_perms).all(-1)
assert (retval.sum(-1) == 1).all()
return retval.long().argmax(-1)
def select_perm_path(lps, possible_perms, perms):
height = len(lps.shape) - 3
retval = torch.moveaxis(
lps,
tuple(range(height)),
tuple((i // 2) if i % 2 == 0 else (i // 2 + height // 2 + 1) for i in
range(height))
)
retval = torch.reshape(retval, (
reduce(mul, retval.shape[:height // 2+1], 1),) + retval.shape[height // 2+1:])
for i in range(perms.shape[-2]-1):
perm = perms[..., i, :]
pidx = indices_of(perm, possible_perms)
retval = retval[torch.arange(retval.shape[0]), pidx]
return retval.squeeze(-3)
@pytest.mark.parametrize("possible_perms,hmm,steps", [
(id_and_transpositions(3), random_phmm(3), 4),
])
def test_exhaustive_value_brute(possible_perms, hmm, steps):
es = ExhaustivePolicy(possible_perms, hmm, steps)
es.compute_perm_tree(delete_belief_tree=False)
sim = HMMSimulator(hmm)
ep, d = sim.all_classifications(steps, perm_policy=es, verbosity=1)
final_belief = select_perm_path(es.belief_tree.beliefs[-1].logits, es.possible_perms, d[b'perms'])
assert torch.allclose(final_belief.logsumexp(-1).exp(), d[b'posterior_log_initial_state_dist'].exp(), atol=1e-7)
assert torch.all(final_belief.logsumexp(-1).argmax(-1) == ep.classifications)
brute_force_rate = ep.log_misclassification_rate()
s = all_strings(steps)
assert torch.allclose(ep.log_joint.logsumexp(-2), hmm.log_prob(s, d[b'perms']))
after_rate = log1mexp((ep.log_joint.logsumexp(-2) + final_belief.logsumexp(-1).max(-1)[0]).logsumexp(-1))
log_min_entropy = (-(final_belief.logsumexp(-1).max(-1)[0])).log()
rrrate = log1mexp((hmm.log_prob(s, d[b'perms']) + d[b'posterior_log_initial_state_dist'].max(-1)[0]).logsumexp(-1))
lps = hmm.log_prob(s, d[b'perms'])
rrate = log1mexp((lps + (- (log_min_entropy.exp()))).logsumexp(-1))
assert torch.allclose(after_rate.exp(), rrrate.exp())
assert torch.allclose(brute_force_rate.exp(), rrrate.exp())
assert torch.allclose(rrate.exp(), rrrate.exp())
| 5,647 | 43.472441 | 244 | py |
perm_hmm | perm_hmm-master/tests/confusion_matrix_test.py | import unittest
import torch
import torch.distributions as dist
from perm_hmm.postprocessing import EmpiricalPostprocessor, ExactPostprocessor
from perm_hmm.util import ZERO
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.num_states = 10
self.testing_states = torch.tensor([0, 3, 4], dtype=int)
self.num_runs = 1000
self.classifications = self.testing_states[torch.randint(len(self.testing_states), (self.num_runs,))]
self.ground_truth = self.testing_states[torch.randint(len(self.testing_states), (self.num_runs,))]
self.empirical_postprocessor = EmpiricalPostprocessor(self.ground_truth, self.classifications)
n = self.num_runs*self.num_states
fake_joint = dist.Dirichlet(torch.full((n,), 1./n)).sample().log()
fake_joint = fake_joint.reshape((self.num_states, self.num_runs))
log_zero = torch.tensor(ZERO).log()
fake_joint[fake_joint < log_zero] = log_zero
not_states = torch.tensor(list(set(range(self.num_states)).difference(set(self.testing_states.tolist()))))
fake_joint[not_states] = torch.tensor(ZERO).log()
fake_joint -= fake_joint.logsumexp(-1).logsumexp(-1)
self.initial_logits = fake_joint.logsumexp(-1)
while (self.initial_logits[self.testing_states].exp() < 1e-6).any():
fake_joint = dist.Dirichlet(torch.full((n,), 1. / n)).sample().log()
fake_joint = fake_joint.reshape((self.num_states, self.num_runs))
log_zero = torch.tensor(ZERO).log()
fake_joint[fake_joint < log_zero] = log_zero
not_states = torch.tensor(list(
set(range(self.num_states)).difference(
set(self.testing_states.tolist()))))
fake_joint[not_states] = torch.tensor(ZERO).log()
fake_joint -= fake_joint.logsumexp(-1).logsumexp(-1)
self.initial_logits = fake_joint.logsumexp(-1)
self.restricted_classifications = self.testing_states[torch.randint(len(self.testing_states), (self.num_runs,))]
self.exact_postprocessor = ExactPostprocessor(fake_joint, self.restricted_classifications)
def test_confusion_matrix(self):
rate_dict = self.empirical_postprocessor.misclassification_rate(.95)
avg_rate = rate_dict[b"rate"]
avg_int = torch.tensor([rate_dict[b"lower"], rate_dict[b"upper"]])
conf_dict = self.empirical_postprocessor.confusion_matrix(.95)
all_rates = conf_dict[b'matrix']
all_ints = torch.stack([conf_dict[b'lower'], conf_dict[b'upper']])
self.assertTrue(torch.all(all_rates[~torch.isnan(all_rates)] <= 1))
for i in range(len(self.testing_states)):
for j in range(len(self.testing_states)):
total_i = (self.ground_truth == self.testing_states[i]).sum().float()
total_ij = (self.classifications[self.ground_truth == self.testing_states[i]] == self.testing_states[j]).sum()
frequency = total_ij/total_i
self.assertTrue(frequency.isclose(all_rates[self.testing_states[i], self.testing_states[j]]))
mask = torch.zeros_like(self.ground_truth, dtype=bool)
for state in self.testing_states:
mask = mask | (state == self.ground_truth)
self.assertTrue(((~(self.classifications[mask] == self.ground_truth[mask])).sum() / float(mask.sum())).isclose(avg_rate))
# cov_num_exp = 5000
# cov_truth = torch.randint(self.num_states, (cov_num_exp, self.num_runs))
# cov_classifications = torch.randint(self.num_states, (cov_num_exp, self.num_runs))
# cov_postprocessor = EmpiricalPostprocessor(cov_truth, cov_classifications)
# results = cov_postprocessor.misclassification_rate(.95)
# avg_coverage = ((results[b"rate"] > avg_int[0]) &
# (results[b"rate"] < avg_int[1])).sum(0) / float(cov_num_exp)
# print(avg_coverage)
# all_coverage = ((results[b"rate"] > all_ints[0]) &
# (results[b"rate"] < all_ints[1])).sum(0) / float(cov_num_exp)
# print(all_coverage)
def test_exact_post(self):
log_average_rate = self.exact_postprocessor.log_misclassification_rate()
log_confusion_matrix = self.exact_postprocessor.log_confusion_matrix()
confusion_rates = log_confusion_matrix.exp()
average_rate = log_average_rate.exp()
self.assertTrue(torch.all(confusion_rates[~torch.isnan(confusion_rates)] <= 1))
log_prior = self.exact_postprocessor.log_joint.logsumexp(-1)
valid_matrix = log_confusion_matrix[torch.meshgrid(self.testing_states, self.testing_states)]
valid_prior = log_prior[self.testing_states].unsqueeze(-1)
test_log_rate = (valid_matrix + valid_prior)[~torch.eye(len(self.testing_states), dtype=bool)].logsumexp(-1)
test_rate = test_log_rate.exp()
self.assertTrue(test_rate.isclose(average_rate, atol=1e-4))
if __name__ == '__main__':
unittest.main()
| 5,022 | 58.094118 | 129 | py |
perm_hmm | perm_hmm-master/tests/bernoulli_tests.py | import unittest
import torch
import pyro.distributions as dist
from perm_hmm.classifiers.interrupted import IIDInterruptedClassifier
from perm_hmm.models.hmms import DiscreteHMM, PermutedDiscreteHMM
from perm_hmm.simulator import HMMSimulator
from perm_hmm.util import transpositions, num_to_data
from perm_hmm.policies.min_tree import MinEntPolicy
from perm_hmm.training.interrupted_training import exact_train_ic, train_ic
from perm_hmm.postprocessing import ExactPostprocessor, EmpiricalPostprocessor
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.num_states = 5
dir = dist.Dirichlet(torch.ones(self.num_states)/self.num_states)
self.observation_dist = dist.Bernoulli(torch.rand((self.num_states,)))
self.transition_logits = dir.sample((self.num_states,)).log()
self.initial_logits = dir.sample().log()
self.num_testing_states = 3
self.testing_states = torch.multinomial(dir.sample(), self.num_testing_states)
while (self.initial_logits.exp()[self.testing_states] < .1).any():
self.testing_states = torch.multinomial(dir.sample(), self.num_testing_states)
self.initial_logits = dir.sample().log()
self.possible_perms = \
torch.stack(
[torch.arange(self.num_states)] +
transpositions(self.num_states)
)
self.num_steps = 6
self.hmm = DiscreteHMM(
self.initial_logits,
self.transition_logits,
self.observation_dist,
)
self.bhmm = PermutedDiscreteHMM.from_hmm(self.hmm)
self.perm_policy = MinEntPolicy(
self.possible_perms,
self.bhmm,
save_history=True,
)
self.ic = IIDInterruptedClassifier(self.observation_dist, torch.tensor(1.))
self.bs = HMMSimulator(
self.bhmm,
)
def test_bernoulli(self):
num_samples = 2000
num_train = 1000
x, y = self.hmm.sample((num_train, self.num_steps))
_ = train_ic(self.ic, y, x[..., 0], self.initial_logits.shape[-1])
iep = self.bs.simulate(self.num_steps, num_samples)
x, training_data = self.bhmm.sample((num_train, self.num_steps))
_ = train_ic(self.ic, training_data, x[..., 0],
len(self.bhmm.initial_logits))
pp = self.bs.simulate(self.num_steps, num_samples, perm_policy=self.perm_policy)
nop, d = self.bs.simulate(self.num_steps, num_samples, verbosity=1)
i_classifications = self.ic.classify(d[b"data"], verbosity=0)
ip = EmpiricalPostprocessor(nop.ground_truth, i_classifications)
print(ip.confusion_matrix())
print(nop.confusion_matrix())
print(pp.confusion_matrix())
base = len(self.bhmm.observation_dist.enumerate_support())
data = torch.stack(
[num_to_data(num, self.num_steps, base) for num in
range(base ** self.num_steps)]
).float()
lp = self.bhmm.log_prob(data)
plisd = self.bhmm.posterior_log_initial_state_dist(data)
log_joint = plisd.T + lp
_ = exact_train_ic(self.ic, data, log_joint)
nop = self.bs.all_classifications(self.num_steps)
pp = self.bs.all_classifications(self.num_steps, perm_policy=self.perm_policy)
ic_classifications = self.ic.classify(data)
ip = ExactPostprocessor(log_joint, ic_classifications)
print(ip.log_misclassification_rate())
print(nop.log_misclassification_rate())
print(pp.log_misclassification_rate())
if __name__ == '__main__':
unittest.main()
| 3,643 | 41.870588 | 90 | py |
perm_hmm | perm_hmm-master/tests/interrupted_tests.py | import unittest
import torch
import pyro.distributions as dist
from perm_hmm.classifiers.interrupted import IIDInterruptedClassifier, IIDBinaryIntClassifier
from perm_hmm.models.hmms import DiscreteHMM, PermutedDiscreteHMM
from perm_hmm.postprocessing import ExactPostprocessor, EmpiricalPostprocessor
import perm_hmm.training.interrupted_training
from perm_hmm.util import transpositions, num_to_data, ZERO
from perm_hmm.policies.min_tree import MinEntPolicy
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.num_states = 5
dir = dist.Dirichlet(torch.ones(self.num_states)/self.num_states)
self.observation_probs = torch.rand((self.num_states,))
self.observation_dist = dist.Bernoulli(self.observation_probs)
self.transition_logits = dir.sample((self.num_states,)).log()
self.num_testing_states = 3
self.testing_states = torch.randint(self.num_states, (self.num_testing_states,))
while len(self.testing_states.unique()) != len(self.testing_states):
self.testing_states = torch.randint(self.num_states, (self.num_testing_states,))
dir = dist.Dirichlet(torch.ones(self.num_testing_states)/self.num_testing_states)
not_states = torch.tensor(list(set(range(self.num_states)).difference(set(self.testing_states.tolist()))))
il = dir.sample()
self.initial_logits = torch.empty(self.num_states)
self.initial_logits[self.testing_states] = il
self.initial_logits[not_states] = ZERO
self.initial_logits = self.initial_logits.log()
self.possible_perms = \
torch.stack(
[torch.arange(self.num_states)] +
transpositions(self.num_states)
)
self.hmm = DiscreteHMM(
self.initial_logits,
self.transition_logits,
self.observation_dist,
)
self.bhmm = PermutedDiscreteHMM.from_hmm(self.hmm)
self.perm_policy = MinEntPolicy(self.possible_perms, self.bhmm, save_history=True)
self.ic = IIDInterruptedClassifier(dist.Bernoulli(self.observation_probs[self.testing_states]), torch.tensor(1.), testing_states=self.testing_states)
def test_ic(self):
num_training_samples = 100
time_dim = 6
training_data = self.hmm.sample((num_training_samples, time_dim))
ground_truth = training_data.states[..., 0]
_ = perm_hmm.training.interrupted_training.train_ic(self.ic, training_data.observations, ground_truth)
num_testing_samples = 300
testing_data = self.hmm.sample((num_testing_samples, time_dim))
i_class = self.ic.classify(testing_data.observations, verbosity=0)
iep = EmpiricalPostprocessor(
testing_data.states[..., 0],
i_class,
)
rate = iep.misclassification_rate()
conf = iep.confusion_matrix(.95)
print(conf)
self.assertTrue(conf[b"matrix"][self.testing_states].sum(-1).allclose(torch.tensor(1.)))
all_possible_runs = torch.stack([num_to_data(x, time_dim) for x in range(2**time_dim)])
plisd = self.hmm.posterior_log_initial_state_dist(all_possible_runs)
lp = self.hmm.log_prob(all_possible_runs)
log_joint = plisd.T + lp
i_class = self.ic.classify(all_possible_runs, verbosity=0)
iep = ExactPostprocessor(
log_joint,
i_class,
)
res = iep.log_misclassification_rate()
conf = iep.log_confusion_matrix()
self.assertTrue(conf[self.testing_states].logsumexp(-1).allclose(torch.tensor(0.), atol=1e-5))
print(res)
def test_consistency(self):
num_states = 3
for i in range(100):
with self.subTest(i=i):
dir = dist.Dirichlet(torch.ones(num_states)/num_states)
observation_probs = torch.rand((num_states,))
observation_dist = dist.Bernoulli(observation_probs)
transition_logits = dir.sample((num_states,)).log()
num_testing_states = 2
testing_states = torch.randint(num_states, (num_testing_states,))
while len(testing_states.unique()) != len(testing_states):
testing_states = torch.randint(num_states, (num_testing_states,))
dir = dist.Dirichlet(torch.ones(num_testing_states)/num_testing_states)
not_states = torch.tensor(list(set(range(num_states)).difference(set(testing_states.tolist()))))
il = dir.sample()
initial_logits = torch.empty(num_states)
initial_logits[testing_states] = il
initial_logits[not_states] = ZERO
initial_logits = initial_logits.log()
hmm = DiscreteHMM(
initial_logits,
transition_logits,
observation_dist,
)
ratio = torch.randint(20, ()).float()
ic = IIDInterruptedClassifier(dist.Bernoulli(observation_probs[[testing_states[:2]]]), ratio, testing_states=testing_states[:2])
bin_ic = IIDBinaryIntClassifier(dist.Bernoulli(observation_probs[testing_states[1]]), dist.Bernoulli(observation_probs[testing_states[0]]), ratio, ratio, testing_states[1], testing_states[0])
time_dim = 8
all_possible_runs = torch.stack([num_to_data(x, time_dim) for x in range(2**time_dim)])
bin_class = bin_ic.classify(all_possible_runs)
classifi = ic.classify(all_possible_runs)
# if not ((bin_class == classifi).all()):
# bin_class = bin_ic.classify(all_possible_runs)
# classifi = ic.classify(all_possible_runs)
self.assertTrue((bin_class == classifi).all())
if __name__ == '__main__':
unittest.main()
| 5,890 | 48.091667 | 207 | py |
perm_hmm | perm_hmm-master/tests/sample_test.py | import unittest
import torch
import numpy as np
import pyro.distributions as dist
from pyro.distributions import DiscreteHMM
from perm_hmm.models.hmms import DiscreteHMM as MyDiscreteHMM
from perm_hmm.models.hmms import PermutedDiscreteHMM
from perm_hmm.util import ZERO, num_to_data
def to_base(x, y, max_length=None):
ret = []
while x > 0:
ret.append(x % y)
x = x // y
if max_length is not None:
ret += [0]*(max_length-len(ret))
return list(reversed(ret))
def joint_lp(states, observations, hmm: MyDiscreteHMM):
return hmm.initial_logits[states[..., 0]] + hmm.transition_logits[states[..., :-1], states[..., 1:]].sum(-1) + \
type(hmm.observation_dist)(hmm.observation_dist._param[states]).log_prob(observations).sum(-1)
class MyTestCase(unittest.TestCase):
def setUp(self):
self.num_states = 2
self.observation_probs = torch.tensor([.5, 1])
self.observation_dist = dist.Bernoulli(self.observation_probs)
self.n_outcomes = 2
self.possible_perms = torch.tensor([[0, 1],
[1, 0]], dtype=int)
self.num_perms = len(self.possible_perms)
self.transition_logits = torch.tensor([[1-ZERO, ZERO], [.5, .5]]).log().float()
self.initial_logits = torch.tensor([.5, .5]).log()
self.shmm = MyDiscreteHMM(self.initial_logits,
self.transition_logits,
self.observation_dist)
self.normal_shmm = DiscreteHMM(self.initial_logits, self.transition_logits, self.observation_dist)
self.bhmm = PermutedDiscreteHMM(self.initial_logits,
self.transition_logits,
self.observation_dist)
self.data = torch.tensor([1.0, 1, 0])
self.data_2 = torch.tensor([1.0, 1, 1])
self.data_3 = torch.tensor([0.0, 1, 1])
self.aye = (torch.eye(2) + ZERO).log()
self.aye -= self.aye.logsumexp(-1, keepdim=True)
self.hmm = MyDiscreteHMM(self.initial_logits, self.aye,
self.observation_dist)
self.normal_hmm = DiscreteHMM(self.initial_logits, self.transition_logits, self.observation_dist)
def test_sample(self):
num_states = 5
p = dist.Categorical(torch.eye(5))
il = \
dist.Dirichlet(
torch.ones(num_states, dtype=torch.float)/num_states).sample()
il = il.log()
il -= il.logsumexp(-1)
lm = torch.tensor([
[.5, .5, ZERO, ZERO, ZERO],
[ZERO, .5, .5, ZERO, ZERO],
[ZERO, ZERO, .5, .5, ZERO],
[ZERO, ZERO, ZERO, .5, .5],
[ZERO, ZERO, ZERO, ZERO, 1-ZERO],
]).log()
lm -= lm.logsumexp(-1, keepdims=True)
hmm = MyDiscreteHMM(il, lm, p)
samp = hmm.sample()
self.assertTrue(samp.observations.shape == (1,))
samp = hmm.sample((1,))
self.assertTrue(samp.observations.shape == (1,))
samp = hmm.sample((3,))
self.assertTrue(samp.observations.shape == (3,))
samp = hmm.sample((500, 4, 8))
self.assertTrue(samp.observations.shape == (500, 4, 8))
diffs = np.diff(np.array(samp.states))
self.assertTrue(np.all((diffs == 0) | (diffs == 1)))
self.assertTrue((samp.observations.int() == samp.states).all())
lm0 = torch.tensor([
[ZERO, 1-ZERO, ZERO, ZERO, ZERO],
[ZERO, 1-ZERO, ZERO, ZERO, ZERO],
[ZERO, ZERO, 1-ZERO, ZERO, ZERO],
[ZERO, ZERO, ZERO, 1-ZERO, ZERO],
[ZERO, ZERO, ZERO, ZERO, 1-ZERO],
]).log()
lm0 -= lm0.logsumexp(-1, keepdims=True)
tlm = torch.stack([torch.roll(lm0, (i, i), (0, 1)) for i in range(4)])
hmm2 = MyDiscreteHMM(il, tlm, p)
samp = hmm2.sample()
self.assertTrue(samp.observations.shape == (4,))
samp = hmm2.sample((1,))
self.assertTrue(samp.observations.shape == (1, 4))
samp = hmm2.sample((3,))
self.assertTrue(samp.observations.shape == (3, 4))
samp = hmm2.sample((500, 2, 8))
self.assertTrue(samp.observations.shape == (500, 2, 8, 4))
diffs = np.diff(np.array(samp.states))
self.assertTrue(np.all((diffs == 0) | (diffs == 1)))
self.assertTrue((samp.observations.int() == samp.states).all())
def test_log_prob(self):
num_states = 3
d = dist.Dirichlet(torch.ones(num_states)/num_states)
m = d.sample((num_states,))
lm = m.log()
b = dist.Bernoulli(torch.rand(num_states))
il = dist.Dirichlet(torch.ones(num_states)/num_states).sample()
hmm = MyDiscreteHMM(il, lm, b)
for i in range(1, 10):
all_data = torch.tensor([list(map(int, ("{{0:0{}b}}".format(i)).format(j))) for j in range(2 ** i)], dtype=torch.float)
hlp = hmm.log_prob(all_data)
ehlp = hlp.logsumexp(-1)
self.assertTrue(ehlp.allclose(torch.tensor(0.0, dtype=torch.float), atol=1e-6))
st = 5
tm = 4
mm = 100
dir = dist.Dirichlet(torch.full((st,), 1. / st))
bern = dist.Bernoulli(torch.rand((st,)))
il = dir.sample().log()
tl = dir.sample((st,)).log()
hmm = MyDiscreteHMM(il, tl, bern)
all_states = torch.stack(
[torch.tensor(to_base(x, st, tm)) for x in range(st ** tm)])
all_runs = torch.stack(
[torch.tensor(num_to_data(x, tm)) for x in range(2 ** tm)]).float()
s, r = torch.broadcast_tensors(all_states.unsqueeze(-2),
all_runs.unsqueeze(-3))
x = joint_lp(s, r, hmm)
self.assertTrue(x.logsumexp(-2).allclose(hmm.log_prob(all_runs)))
slp = self.shmm.log_prob(self.data[0].unsqueeze(-1))
hlp = self.hmm.log_prob(self.data[0].unsqueeze(-1))
self.assertTrue(slp.allclose(hlp))
slp = self.shmm.log_prob(self.data)
hlp = self.hmm.log_prob(self.data)
self.assertTrue(slp.allclose(torch.tensor([3/16]).log()))
self.assertTrue(hlp.allclose(torch.tensor([1/16]).log()))
if __name__ == '__main__':
unittest.main()
| 6,266 | 41.632653 | 131 | py |
perm_hmm | perm_hmm-master/tests/util_tests.py | import unittest
import torch
from perm_hmm import util
class MyTestCase(unittest.TestCase):
def test_first_nonzero(self):
batch_shape = (5,)
sample_shape = (100, 6, 7)
foos = torch.distributions.Bernoulli(torch.rand(batch_shape)).sample(sample_shape).bool()
for foo in foos:
with self.subTest(foo=foo):
bar = torch.full(foo.shape[:-1], foo.shape[-1], dtype=int)
for i in range(foo.shape[0]):
for j in range(foo.shape[1]):
for k in range(foo.shape[2]):
if foo[i, j, k]:
bar[i, j] = k
break
baz = util.first_nonzero(foo)
self.assertTrue((bar == baz).all())
bar = torch.full(foo.shape[:1] + foo.shape[2:], foo.shape[-2], dtype=int)
for i in range(foo.shape[0]):
for k in range(foo.shape[2]):
for j in range(foo.shape[1]):
if foo[i, j, k]:
bar[i, k] = j
break
baz = util.first_nonzero(foo, dim=-2)
self.assertTrue((bar == baz).all())
bar = torch.full(foo.shape[:0] + foo.shape[1:], foo.shape[-3], dtype=int)
for j in range(foo.shape[1]):
for k in range(foo.shape[2]):
for i in range(foo.shape[0]):
if foo[i, j, k]:
bar[j, k] = i
break
baz = util.first_nonzero(foo, dim=-3)
self.assertTrue((bar == baz).all())
foo = torch.distributions.Bernoulli(torch.rand(batch_shape)).sample(()).bool()
bar = torch.full((), foo.shape[-1], dtype=int)
for i in range(foo.shape[-1]):
if foo[i]:
bar = i
break
baz = util.first_nonzero(foo)
self.assertTrue(bar == baz)
foo = torch.distributions.Bernoulli(torch.rand((1,))).sample(()).bool()
bar = torch.full((), foo.shape[-1], dtype=int)
for i in range(foo.shape[-1]):
if foo[i]:
bar = i
break
baz = util.first_nonzero(foo)
self.assertTrue(bar == baz)
# foo = torch.distributions.Bernoulli(torch.rand(())).sample(()).bool()
# bar = torch.full((), foo.shape[-1], dtype=int)
# if foo:
# bar = i
# baz = util.first_nonzero(foo)
# self.assertTrue(bar == baz)
| 2,652 | 41.111111 | 97 | py |
perm_hmm | perm_hmm-master/tests/binning_tests.py | import pytest
import torch
import pyro.distributions as dist
from perm_hmm.binning import bin_histogram, bin_log_histogram, binned_expanded_hmm, binned_hmm, optimally_binned_consecutive
from perm_hmm.models.hmms import DiscreteHMM, PermutedDiscreteHMM, ExpandedHMM
from example_systems.bin_beryllium import binned_hmm_class_value
from example_systems.beryllium import dimensionful_gamma, expanded_transitions, expanded_initial, expanded_outcomes
@pytest.mark.parametrize("base_hist,bin_edges,result", [
(
torch.tensor([
[.1, .2, .3, .4],
[.2, .5, 0., .3],
]),
torch.tensor([
0, 2, 4
]),
torch.tensor([
[.3, .7],
[.7, .3],
]),
),
(
torch.tensor([
[.1, .9],
[.2, .8],
]),
torch.tensor([
0, 1, 2
]),
torch.tensor([
[.1, .9],
[.2, .8],
]),
),
])
def test_bin_histogram(base_hist, bin_edges, result):
binned = bin_histogram(base_hist, bin_edges)
assert binned.allclose(result)
@pytest.mark.parametrize("base_hist,bin_edges,result", [
(
torch.tensor([
[.1, .2, .3, .4],
[.2, .5, 0., .3],
]),
torch.tensor([
0, 2, 4
]),
torch.tensor([
[.3, .7],
[.7, .3],
]),
),
(
torch.tensor([
[.1, .9],
[.2, .8],
]),
torch.tensor([
0, 1, 2
]),
torch.tensor([
[.1, .9],
[.2, .8],
]),
),
])
def test_bin_log_histogram(base_hist, bin_edges, result):
base_log_hist = torch.log(base_hist)
log_result = torch.log(result)
binned = bin_log_histogram(base_log_hist, bin_edges)
assert binned.allclose(log_result)
def test_binned_hmm():
n = 3
il = (torch.ones(n)/n).log()
transition_logits = (torch.eye(n) + 1e-14).log()
transition_logits -= transition_logits.logsumexp(-1, keepdim=True)
m = 8
output_dirichlet = dist.Dirichlet(torch.ones(m)/m)
observation_dist = dist.Categorical(probs=output_dirichlet.sample((n,)))
hmm = DiscreteHMM(
il,
transition_logits,
observation_dist,
)
bin_edges = torch.tensor([
0, 3, 6, m
])
binned = binned_hmm(hmm, bin_edges)
assert (binned.transition_logits == hmm.transition_logits).all()
assert (binned.initial_logits == hmm.initial_logits).all()
def test_optimally_binned_consecutive():
n = 3
il = (torch.ones(n)/n).log()
transition_logits = (torch.eye(n) + 1e-14).log()
transition_logits -= transition_logits.logsumexp(-1, keepdim=True)
m = 8
output_dirichlet = dist.Dirichlet(torch.ones(m)/m)
observation_dist = dist.Categorical(probs=output_dirichlet.sample((n,)))
hmm = PermutedDiscreteHMM(
il,
transition_logits,
observation_dist,
)
min_edges, min_cost = optimally_binned_consecutive(hmm, 3)
assert min_cost < torch.log(torch.tensor(.5))
assert len(min_edges) == 4
def test_bin_beryllium():
time = 1e-5 * dimensionful_gamma
max_photons = 10
num_bins = 4
steps = 2
v1 = binned_hmm_class_value(time, max_photons, num_bins, steps)
initial_logits = expanded_initial(max_photons)
transition_logits = expanded_transitions(time, max_photons)
observation_dist = dist.Categorical(logits=torch.from_numpy(expanded_outcomes(max_photons)))
expanded_hmm = ExpandedHMM(
torch.from_numpy(initial_logits),
torch.from_numpy(transition_logits),
observation_dist,
)
v2 = optimally_binned_consecutive(expanded_hmm, num_bins, steps=steps)
hmm1 = v1[b"hmm"]
hmm2 = binned_expanded_hmm(expanded_hmm, v2[0])
assert torch.allclose(hmm1.initial_logits.exp(), hmm2.initial_logits.exp())
assert torch.allclose(hmm1.transition_logits.exp(), hmm2.transition_logits.exp())
assert torch.allclose(hmm1.observation_dist._param.exp(), hmm2.observation_dist._param.exp())
| 4,281 | 29.585714 | 124 | py |
perm_hmm | perm_hmm-master/example_systems/three_states.py | r"""
This module implements a simple three state model shown in the figure. The
circles on the left represent states, while the squares on the right are
outputs.
.. image:: _static/three_state_model.svg
"""
import numpy as np
import torch
import pyro.distributions as dist
from perm_hmm.util import ZERO, log1mexp
from perm_hmm.models.hmms import PermutedDiscreteHMM
def three_state_params(a, b, c=None, d=None):
"""Gives a list of parameters for the three state model.
:return: A tuple of (initial state probabilities (uniform),
transition probabilities,
output probabilities)
"""
num_states = 3
if c is None:
c = b
if d is None:
d = a
initial_probs = np.ones(num_states)/num_states
output_probs = np.array([[1-a, a, 0.], [(1-d)/2, d, (1-d)/2], [0., a, (1-a)-0.]])
transition_probs = np.array([[1-b-0., b, 0.],[(1-c)/2, c, (1-c)/2],[0., b, 1-b-0.]])
return initial_probs, transition_probs, output_probs
def three_state_log_params(log_a, log_b, log_c=None, log_d=None):
"""Log parameters. Inputs are log as well.
:return: A tuple of (log initial state probabilities (uniform),
log transition probabilities,
log output probabilities)
"""
num_states = 3
if log_c is None:
log_c = log_b
if log_d is None:
log_d = log_a
initial_logits = np.zeros((num_states,)) - np.log(num_states)
nota = log1mexp(log_a)
notb = log1mexp(log_b)
notc = log1mexp(log_c)
notd = log1mexp(log_d)
logzero = np.log(ZERO)
output_logits = np.array([
[nota, log_a, logzero],
[notd - np.log(2), log_d, notd - np.log(2)],
[logzero, log_a, nota]
])
transition_logits = np.array([
[notb, log_b, logzero],
[notc - np.log(2), log_c, notc - np.log(2)],
[logzero, log_b, notb]
])
return initial_logits, transition_logits, output_logits
def three_state_hmm(log_a, log_b, log_c=None, log_d=None):
r"""Gives a Permuted discrete HMM for the inputs.
:return: A PermutedDiscreteHMM for the input parameters.
"""
initial_logits, transition_logits, output_logits = [torch.from_numpy(x) for x in three_state_log_params(log_a, log_b, log_c, log_d)]
observation_dist = dist.Categorical(logits=output_logits)
hmm = PermutedDiscreteHMM(
initial_logits,
transition_logits,
observation_dist,
)
return hmm
| 2,438 | 30.269231 | 136 | py |
perm_hmm | perm_hmm-master/example_systems/bin_beryllium.py | import os
import argparse
from itertools import combinations
import numpy as np
import matplotlib.pyplot as plt
import torch
from scipy.special import logsumexp
import pyro.distributions as dist
from pyro.distributions import Categorical
from perm_hmm.models.hmms import ExpandedHMM
from perm_hmm.simulator import HMMSimulator
from perm_hmm.util import kl_divergence
from adapt_hypo_test.two_states.util import log1mexp
from example_systems import beryllium as be
from example_systems.beryllium import DARK_STATE, BRIGHT_STATE, N_STATES, dimensionful_gamma, expanded_transitions, expanded_initial, expanded_outcomes
def save_hist_plot(fig, savedir):
plt.sca(fig.gca())
fn = os.path.join(savedir, "hist.svg")
plt.savefig(fn)
return fn
def save_histograms(binned, savedir):
fn = os.path.join(savedir, "histograms.npz")
np.savez(
fn,
binned=binned,
)
return fn
def plot_binned(binned, bin_edges, labels=None, fig=None):
if fig is None:
fig = plt.figure()
if labels is None:
labels = ["Dark", "Bright"]
ax = fig.gca()
for b, l in zip(binned, labels):
ax.bar(bin_edges[:-1], np.exp(b), np.diff(bin_edges), align="edge", alpha=.5, label=l)
plt.xlabel("Number of photons")
plt.ylabel("Probability of detection")
return fig
def plot_unbinned(hists, labels=None, fig=None):
if fig is None:
fig = plt.figure()
if labels is None:
labels = ["Dark", "Bright"]
ax = fig.gca()
for h, l in zip(hists, labels):
ax.bar(np.arange(hists.shape[-1]), np.exp(h), align="edge", alpha=.5, label=l)
return fig
def bin_histogram(base_hist, bin_edges):
new_hist = []
for i in range(len(bin_edges)-1):
new_hist.append(logsumexp(base_hist[..., bin_edges[i]:bin_edges[i+1]], axis=-1))
return np.stack(new_hist, axis=-1)
def find_optimal_binning(base_hists, value_func, num_bins, return_opt=False):
max_phot = base_hists.shape[-1]
max_value = None
best_bin_edges = None
best_binned = None
for bin_edges in combinations(np.arange(max_phot-2)+1, num_bins-1):
bin_edges = np.concatenate((np.array([0]), bin_edges, np.array([max_phot])))
binned = bin_histogram(base_hists, bin_edges)
value = value_func(bin_edges)
if (max_value is None) or (value > max_value):
max_value = value
best_bin_edges = bin_edges
best_binned = binned
retval = {
b"binned_bright_dark": best_binned,
b"bin_edges": best_bin_edges,
}
if return_opt:
retval[b"max_value"] = max_value
return retval
def unbinned_hists(integration_time, max_photons=10):
hists = be.log_prob_n_given_l(np.arange(max_photons), integration_time)
total_weight = logsumexp(hists, -2)
hists[-1] = logsumexp(np.stack([hists[-1, :], log1mexp(total_weight)], axis=-2), axis=-2)
hists = hists[..., [DARK_STATE, BRIGHT_STATE]]
hists = hists.transpose()
return hists
def bin_bright_dark(hists, num_bins, value_func=None, return_opt=False):
if value_func is None:
def value_func(bin_edges):
bright_dark_hists = bin_histogram(hists, bin_edges)
return kl_divergence(bright_dark_hists[0, :], bright_dark_hists[1, :]) + kl_divergence(bright_dark_hists[1, :], bright_dark_hists[0, :])
return find_optimal_binning(hists, value_func, num_bins, return_opt=return_opt)
def bin_all(hists, num_bins, value_func=None, return_opt=False):
retval = bin_bright_dark(hists, num_bins, value_func=value_func, return_opt=return_opt)
bin_edges = retval[b"bin_edges"]
binned_hists = bin_histogram(hists, bin_edges)
retval.pop(b"binned_bright_dark")
retval[b"binned_hists"] = binned_hists
return retval
def bin_hmm_from_bin_edges(time, bin_edges, max_photons):
num_bins = len(bin_edges) - 1
ltm = expanded_transitions(time, k=max_photons)
ltm = ltm.reshape((N_STATES, max_photons, N_STATES, max_photons))
binned_transitions = bin_histogram(ltm, bin_edges)[:, np.arange(num_bins), ...].reshape((N_STATES*num_bins, N_STATES*num_bins)) # Doesn't matter which "previous outcome" slice we take, as long as it has the right size.
binned_initial = expanded_initial(k=num_bins)
binned_outcomes = expanded_outcomes(k=num_bins)
observation_dist = Categorical(logits=torch.from_numpy(binned_outcomes))
hmm = ExpandedHMM(torch.from_numpy(binned_initial), torch.from_numpy(binned_transitions), observation_dist)
return hmm
def log_class_prob(model, steps):
sim = HMMSimulator(model)
ep = sim.all_classifications(steps)
log_misclass_rate = ep.log_misclassification_rate()
return log1mexp(log_misclass_rate)
def make_value_func(time, steps, max_photons):
def value_func(bin_edges):
model = bin_hmm_from_bin_edges(time, bin_edges, max_photons)
return log_class_prob(model, steps)
return value_func
def get_optimal_bin_edges(time, max_photons=10, num_bins=4, value_func=None, verbosity=0):
base_hists = unbinned_hists(time, max_photons=max_photons)
return bin_all(base_hists, num_bins=num_bins, value_func=value_func, return_opt=bool(verbosity))
def binned_hmm_class_value(time, max_photons=10, num_bins=4, steps=3, verbosity=0):
optimal_binning = get_optimal_bin_edges(time, max_photons=max_photons, num_bins=num_bins, value_func=make_value_func(time, steps, max_photons), verbosity=verbosity)
bin_edges = optimal_binning[b"bin_edges"]
binned_hmm = bin_hmm_from_bin_edges(time, bin_edges, max_photons)
return {
b"hmm": binned_hmm,
b"optimal_binning": optimal_binning,
}
def bin_hmm(time, max_photons=10, num_bins=4, value_func=None, verbosity=0):
r"""Given an integration time, bins the output distributions such that the
symmetrized divergence from the bright state to the dark state is maximal,
then returns everything used to compute that, along with the resulting HMM.
:param time:
:return:
"""
base_hists = unbinned_hists(time, max_photons=max_photons)
opt_bin = bin_all(base_hists, num_bins, value_func=value_func, return_opt=bool(verbosity))
bin_edges = opt_bin[b"bin_edges"]
ltm = expanded_transitions(time, k=max_photons)
ltm = ltm.reshape((N_STATES, max_photons, N_STATES, max_photons))
binned_transitions = bin_histogram(ltm, bin_edges)[:, np.arange(num_bins), ...].reshape((N_STATES*num_bins, N_STATES*num_bins)) # Doesn't matter which "previous outcome" slice we take, as long as it has the right size.
binned_initial = expanded_initial(k=num_bins)
# binned_initial = bin_histogram(base_initial, bin_edges)
binned_outcomes = expanded_outcomes(k=num_bins)
observation_dist = dist.Categorical(logits=torch.from_numpy(binned_outcomes))
hmm = ExpandedHMM(torch.from_numpy(binned_initial), torch.from_numpy(binned_transitions), observation_dist)
retval = {
b"hmm": hmm,
b"base_hists": base_hists,
b"optimal_binning": opt_bin,
}
return retval
def main(dimensionful_integration_time, num_bins, max_phot=10, savedir=None, save=False):
if num_bins > max_phot:
raise ValueError("Too many bins")
integration_time = dimensionful_integration_time * dimensionful_gamma
hists = unbinned_hists(integration_time, max_photons=max_phot)
bin_dict = bin_bright_dark(hists, num_bins, return_opt=True)
binned = bin_dict[b"binned_bright_dark"]
bin_edges = bin_dict[b"bin_edges"]
opt_divergence = bin_dict[b"max_value"]
fig = plot_binned(binned, bin_edges)
fig = plot_unbinned(hists, fig=fig)
if save:
if savedir is None:
savedir = os.getcwd()
_ = save_hist_plot(fig, savedir)
_ = save_histograms(binned, savedir)
plt.show()
unbinned_divergence = kl_divergence(hists[0, :], hists[1, :]) + kl_divergence(hists[1, :], hists[0, :])
print("Time: {}, Bins: {}, Photons: {}, Binned divergence: {}, Unbinned divergence: {}".format(dimensionful_integration_time, num_bins, max_phot, opt_divergence, unbinned_divergence))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"integration_time",
metavar="integration-time",
help="The amount of time to integrate the collection of photons for,"
"in units of 1/(2 * pi * 19.4 * 10 ** 6 Hz)",
type=float,
)
parser.add_argument(
"num_bins",
metavar="num-bins",
type=int,
default=5,
)
parser.add_argument(
"max_phot",
metavar="max-phot",
type=int,
default=5,
)
args = parser.parse_args()
main(args.integration_time, args.num_bins, max_phot=args.max_phot)
| 8,753 | 37.906667 | 223 | py |
AdaGCN_TKDE | AdaGCN_TKDE-main/layers.py | from inits import *
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1./keep_prob)
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
class Layer(object):
"""Base layer class. Defines basic API for all layer objects.
Implementation inspired by keras (http://keras.io).
# Properties
name: String, defines the variable scope of the layer.
logging: Boolean, switches Tensorflow histogram logging on/off
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
_log_vars(): Log all variables
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if self.logging and not self.sparse_inputs:
tf.summary.histogram(self.name + '/inputs', inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram(self.name + '/outputs', outputs)
return outputs
class Dense(Layer):
"""Dense layer."""
def __init__(self,
placeholder_dropout,
placeholder_num_features_nonzero,
weights,
bias,
dropout=True,
sparse_inputs=False,
act=tf.nn.relu,
flag_bias=False,
**kwargs):
super(Dense, self).__init__(**kwargs)
if dropout:
self.dropout = placeholder_dropout
else:
self.dropout = 0.
self.act = act
self.sparse_inputs = sparse_inputs
self.weights = weights
self.bias = bias
self.flag_bias = flag_bias
# helper variable for sparse dropout
self.num_features_nonzero = placeholder_num_features_nonzero,
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# transform
output = dot(x, self.weights, sparse=self.sparse_inputs)
# bias
if self.flag_bias:
output += self.bias
return self.act(output)
class GraphConvolution(Layer):
"""Graph convolution layer."""
def __init__(self,
input_dim,
output_dim,
placeholder_dropout,
placeholder_support,
placeholder_num_features_nonzero,
weights,
bias,
dropout=True,
sparse_inputs=False,
act=tf.nn.relu,
flag_bias=False,
featureless=False,
**kwargs):
super(GraphConvolution, self).__init__(**kwargs)
if dropout:
self.dropout = placeholder_dropout
else:
self.dropout = 0.
self.act = act
self.support = placeholder_support
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.flag_bias = flag_bias
self.weights = weights
self.bias = bias
# helper variable for sparse dropout
self.num_features_nonzero = placeholder_num_features_nonzero
def conv(self, adj, features):
'''
IGCN renormalization filtering
'''
def tf_rnm(adj, features, k):
new_feature = features
for _ in range(k):
new_feature = tf.sparse_tensor_dense_matmul(adj, new_feature)
# dense_adj = tf.sparse_tensor_to_dense(adj, validate_indices=False)
# new_feature = tf.matmul(dense_adj, new_feature, a_is_sparse=True)
return new_feature
result = tf_rnm(adj, features, FLAGS.smoothing_steps)
return result
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# convolve
supports = list()
for i in range(len(self.support)):
if not self.featureless:
pre_sup = dot(x, self.weights, sparse=self.sparse_inputs)
else:
pre_sup = self.weights
if FLAGS.gnn=='gcn':
support = dot(self.support[i], pre_sup, sparse=True)
elif FLAGS.gnn=='igcn':
support = self.conv(self.support[i], pre_sup)
supports.append(support)
output = tf.add_n(supports)
# bias
if self.flag_bias:
output += self.bias
return self.act(output)
| 6,120 | 28.427885 | 84 | py |
GOAD | GOAD-master/opt_tc.py | import torch.utils.data
import numpy as np
import torch
import torch.utils.data
from torch.backends import cudnn
from wideresnet import WideResNet
from sklearn.metrics import roc_auc_score
cudnn.benchmark = True
def tc_loss(zs, m):
means = zs.mean(0).unsqueeze(0)
res = ((zs.unsqueeze(2) - means.unsqueeze(1)) ** 2).sum(-1)
pos = torch.diagonal(res, dim1=1, dim2=2)
offset = torch.diagflat(torch.ones(zs.size(1))).unsqueeze(0).cuda() * 1e6
neg = (res + offset).min(-1)[0]
loss = torch.clamp(pos + m - neg, min=0).mean()
return loss
class TransClassifier():
def __init__(self, num_trans, args):
self.n_trans = num_trans
self.args = args
self.netWRN = WideResNet(self.args.depth, num_trans, self.args.widen_factor).cuda()
self.optimizer = torch.optim.Adam(self.netWRN.parameters())
def fit_trans_classifier(self, x_train, x_test, y_test):
print("Training")
self.netWRN.train()
bs = self.args.batch_size
N, sh, sw, nc = x_train.shape
n_rots = self.n_trans
m = self.args.m
celoss = torch.nn.CrossEntropyLoss()
ndf = 256
for epoch in range(self.args.epochs):
rp = np.random.permutation(N//n_rots)
rp = np.concatenate([np.arange(n_rots) + rp[i]*n_rots for i in range(len(rp))])
assert len(rp) == N
all_zs = torch.zeros((len(x_train), ndf)).cuda()
diffs_all = []
for i in range(0, len(x_train), bs):
batch_range = min(bs, len(x_train) - i)
idx = np.arange(batch_range) + i
xs = torch.from_numpy(x_train[rp[idx]]).float().cuda()
zs_tc, zs_ce = self.netWRN(xs)
all_zs[idx] = zs_tc
train_labels = torch.from_numpy(np.tile(np.arange(n_rots), batch_range//n_rots)).long().cuda()
zs = torch.reshape(zs_tc, (batch_range//n_rots, n_rots, ndf))
means = zs.mean(0).unsqueeze(0)
diffs = -((zs.unsqueeze(2).detach().cpu().numpy() - means.unsqueeze(1).detach().cpu().numpy()) ** 2).sum(-1)
diffs_all.append(torch.diagonal(torch.tensor(diffs), dim1=1, dim2=2))
tc = tc_loss(zs, m)
ce = celoss(zs_ce, train_labels)
if self.args.reg:
loss = ce + self.args.lmbda * tc + 10 *(zs*zs).mean()
else:
loss = ce + self.args.lmbda * tc
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.netWRN.eval()
all_zs = torch.reshape(all_zs, (N//n_rots, n_rots, ndf))
means = all_zs.mean(0, keepdim=True)
with torch.no_grad():
batch_size = bs
val_probs_rots = np.zeros((len(y_test), self.n_trans))
for i in range(0, len(x_test), batch_size):
batch_range = min(batch_size, len(x_test) - i)
idx = np.arange(batch_range) + i
xs = torch.from_numpy(x_test[idx]).float().cuda()
zs, fs = self.netWRN(xs)
zs = torch.reshape(zs, (batch_range // n_rots, n_rots, ndf))
diffs = ((zs.unsqueeze(2) - means) ** 2).sum(-1)
diffs_eps = self.args.eps * torch.ones_like(diffs)
diffs = torch.max(diffs, diffs_eps)
logp_sz = torch.nn.functional.log_softmax(-diffs, dim=2)
zs_reidx = np.arange(batch_range // n_rots) + i // n_rots
val_probs_rots[zs_reidx] = -torch.diagonal(logp_sz, 0, 1, 2).cpu().data.numpy()
val_probs_rots = val_probs_rots.sum(1)
print("Epoch:", epoch, ", AUC: ", roc_auc_score(y_test, -val_probs_rots))
| 3,871 | 38.510204 | 124 | py |
GOAD | GOAD-master/data_loader.py | import scipy.io
import numpy as np
import pandas as pd
import torchvision.datasets as dset
import os
class Data_Loader:
def __init__(self, n_trains=None):
self.n_train = n_trains
self.urls = [
"http://kdd.ics.uci.edu/databases/kddcup99/kddcup.data_10_percent.gz",
"http://kdd.ics.uci.edu/databases/kddcup99/kddcup.names"
]
def norm_kdd_data(self, train_real, val_real, val_fake, cont_indices):
symb_indices = np.delete(np.arange(train_real.shape[1]), cont_indices)
mus = train_real[:, cont_indices].mean(0)
sds = train_real[:, cont_indices].std(0)
sds[sds == 0] = 1
def get_norm(xs, mu, sd):
bin_cols = xs[:, symb_indices]
cont_cols = xs[:, cont_indices]
cont_cols = np.array([(x - mu) / sd for x in cont_cols])
return np.concatenate([bin_cols, cont_cols], 1)
train_real = get_norm(train_real, mus, sds)
val_real = get_norm(val_real, mus, sds)
val_fake = get_norm(val_fake, mus, sds)
return train_real, val_real, val_fake
def norm_data(self, train_real, val_real, val_fake):
mus = train_real.mean(0)
sds = train_real.std(0)
sds[sds == 0] = 1
def get_norm(xs, mu, sd):
return np.array([(x - mu) / sd for x in xs])
train_real = get_norm(train_real, mus, sds)
val_real = get_norm(val_real, mus, sds)
val_fake = get_norm(val_fake, mus, sds)
return train_real, val_real, val_fake
def norm(self, data, mu=1):
return 2 * (data / 255.) - mu
def get_dataset(self, dataset_name, c_percent=None, true_label=1):
if dataset_name == 'cifar10':
return self.load_data_CIFAR10(true_label)
if dataset_name == 'kdd':
return self.KDD99_train_valid_data()
if dataset_name == 'kddrev':
return self.KDD99Rev_train_valid_data()
if dataset_name == 'thyroid':
return self.Thyroid_train_valid_data()
if dataset_name == 'arrhythmia':
return self.Arrhythmia_train_valid_data()
if dataset_name == 'ckdd':
return self.contaminatedKDD99_train_valid_data(c_percent)
def load_data_CIFAR10(self, true_label):
root = './data'
if not os.path.exists(root):
os.mkdir(root)
trainset = dset.CIFAR10(root, train=True, download=True)
train_data = np.array(trainset.data)
train_labels = np.array(trainset.targets)
testset = dset.CIFAR10(root, train=False, download=True)
test_data = np.array(testset.data)
test_labels = np.array(testset.targets)
train_data = train_data[np.where(train_labels == true_label)]
x_train = self.norm(np.asarray(train_data, dtype='float32'))
x_test = self.norm(np.asarray(test_data, dtype='float32'))
return x_train, x_test, test_labels
def Thyroid_train_valid_data(self):
data = scipy.io.loadmat("data/thyroid.mat")
samples = data['X'] # 3772
labels = ((data['y']).astype(np.int32)).reshape(-1)
norm_samples = samples[labels == 0] # 3679 norm
anom_samples = samples[labels == 1] # 93 anom
n_train = len(norm_samples) // 2
x_train = norm_samples[:n_train] # 1839 train
val_real = norm_samples[n_train:]
val_fake = anom_samples
return self.norm_data(x_train, val_real, val_fake)
def Arrhythmia_train_valid_data(self):
data = scipy.io.loadmat("data/arrhythmia.mat")
samples = data['X'] # 518
labels = ((data['y']).astype(np.int32)).reshape(-1)
norm_samples = samples[labels == 0] # 452 norm
anom_samples = samples[labels == 1] # 66 anom
n_train = len(norm_samples) // 2
x_train = norm_samples[:n_train] # 226 train
val_real = norm_samples[n_train:]
val_fake = anom_samples
return self.norm_data(x_train, val_real, val_fake)
def KDD99_preprocessing(self):
df_colnames = pd.read_csv(self.urls[1], skiprows=1, sep=':', names=['f_names', 'f_types'])
df_colnames.loc[df_colnames.shape[0]] = ['status', ' symbolic.']
df = pd.read_csv(self.urls[0], header=None, names=df_colnames['f_names'].values)
df_symbolic = df_colnames[df_colnames['f_types'].str.contains('symbolic.')]
df_continuous = df_colnames[df_colnames['f_types'].str.contains('continuous.')]
samples = pd.get_dummies(df.iloc[:, :-1], columns=df_symbolic['f_names'][:-1])
smp_keys = samples.keys()
cont_indices = []
for cont in df_continuous['f_names']:
cont_indices.append(smp_keys.get_loc(cont))
labels = np.where(df['status'] == 'normal.', 1, 0)
return np.array(samples), np.array(labels), cont_indices
def KDD99_train_valid_data(self):
samples, labels, cont_indices = self.KDD99_preprocessing()
anom_samples = samples[labels == 1] # norm: 97278
norm_samples = samples[labels == 0] # attack: 396743
n_norm = norm_samples.shape[0]
ranidx = np.random.permutation(n_norm)
n_train = n_norm // 2
x_train = norm_samples[ranidx[:n_train]]
norm_test = norm_samples[ranidx[n_train:]]
val_real = norm_test
val_fake = anom_samples
return self.norm_kdd_data(x_train, val_real, val_fake, cont_indices)
def KDD99Rev_train_valid_data(self):
samples, labels, cont_indices = self.KDD99_preprocessing()
norm_samples = samples[labels == 1] # norm: 97278
# Randomly draw samples labeled as 'attack'
# so that the ratio btw norm:attack will be 4:1
# len(anom) = 24,319
anom_samples = samples[labels == 0] # attack: 396743
rp = np.random.permutation(len(anom_samples))
rp_cut = rp[:24319]
anom_samples = anom_samples[rp_cut] # attack:24319
n_norm = norm_samples.shape[0]
ranidx = np.random.permutation(n_norm)
n_train = n_norm // 2
x_train = norm_samples[ranidx[:n_train]]
norm_test = norm_samples[ranidx[n_train:]]
val_real = norm_test
val_fake = anom_samples
return self.norm_kdd_data(x_train, val_real, val_fake, cont_indices)
def contaminatedKDD99_train_valid_data(self, c_percent):
samples, labels, cont_indices = self.KDD99_preprocessing()
ranidx = np.random.permutation(len(samples))
n_test = len(samples)//2
x_test = samples[ranidx[:n_test]]
y_test = labels[ranidx[:n_test]]
x_train = samples[ranidx[n_test:]]
y_train = labels[ranidx[n_test:]]
norm_samples = x_train[y_train == 0] # attack: 396743
anom_samples = x_train[y_train == 1] # norm: 97278
n_contaminated = int((c_percent/100)*len(anom_samples))
rpc = np.random.permutation(n_contaminated)
x_train = np.concatenate([norm_samples, anom_samples[rpc]])
val_real = x_test[y_test == 0]
val_fake = x_test[y_test == 1]
return self.norm_kdd_data(x_train, val_real, val_fake, cont_indices)
| 7,157 | 34.79 | 98 | py |
GOAD | GOAD-master/transformations.py | import abc
import itertools
import numpy as np
from keras.preprocessing.image import apply_affine_transform
# The code is adapted from https://github.com/izikgo/AnomalyDetectionTransformations/blob/master/transformations.py
def get_transformer(type_trans):
if type_trans == 'complicated':
tr_x, tr_y = 8, 8
transformer = Transformer(tr_x, tr_y)
return transformer
elif type_trans == 'simple':
transformer = SimpleTransformer()
return transformer
class AffineTransformation(object):
def __init__(self, flip, tx, ty, k_90_rotate):
self.flip = flip
self.tx = tx
self.ty = ty
self.k_90_rotate = k_90_rotate
def __call__(self, x):
res_x = x
if self.flip:
res_x = np.fliplr(res_x)
if self.tx != 0 or self.ty != 0:
res_x = apply_affine_transform(res_x,
tx=self.tx, ty=self.ty, channel_axis=2, fill_mode='reflect')
if self.k_90_rotate != 0:
res_x = np.rot90(res_x, self.k_90_rotate)
return res_x
class AbstractTransformer(abc.ABC):
def __init__(self):
self._transformation_list = None
self._create_transformation_list()
@property
def n_transforms(self):
return len(self._transformation_list)
@abc.abstractmethod
def _create_transformation_list(self):
return
def transform_batch(self, x_batch, t_inds):
assert len(x_batch) == len(t_inds)
transformed_batch = x_batch.copy()
for i, t_ind in enumerate(t_inds):
transformed_batch[i] = self._transformation_list[t_ind](transformed_batch[i])
return transformed_batch
class Transformer(AbstractTransformer):
def __init__(self, translation_x=8, translation_y=8):
self.max_tx = translation_x
self.max_ty = translation_y
super().__init__()
def _create_transformation_list(self):
transformation_list = []
for is_flip, tx, ty, k_rotate in itertools.product((False, True),
(0, -self.max_tx, self.max_tx),
(0, -self.max_ty, self.max_ty),
range(4)):
transformation = AffineTransformation(is_flip, tx, ty, k_rotate)
transformation_list.append(transformation)
self._transformation_list = transformation_list
return transformation_list
class SimpleTransformer(AbstractTransformer):
def _create_transformation_list(self):
transformation_list = []
for is_flip, k_rotate in itertools.product((False, True),
range(4)):
transformation = AffineTransformation(is_flip, 0, 0, k_rotate)
transformation_list.append(transformation)
self._transformation_list = transformation_list
return transformation_list
| 2,988 | 33.755814 | 115 | py |
GOAD | GOAD-master/opt_tc_tabular.py | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import fcnet as model
from sklearn.metrics import precision_recall_fscore_support as prf
def tc_loss(zs, m):
means = zs.mean(0).unsqueeze(0)
res = ((zs.unsqueeze(2) - means.unsqueeze(1)) ** 2).sum(-1)
pos = torch.diagonal(res, dim1=1, dim2=2)
offset = torch.diagflat(torch.ones(zs.size(1))).unsqueeze(0).cuda() * 1e6
neg = (res + offset).min(-1)[0]
loss = torch.clamp(pos + m - neg, min=0).mean()
return loss
def f_score(scores, labels, ratio):
thresh = np.percentile(scores, ratio)
y_pred = (scores >= thresh).astype(int)
y_true = labels.astype(int)
precision, recall, f_score, support = prf(y_true, y_pred, average='binary')
return f_score
class TransClassifierTabular():
def __init__(self, args):
self.ds = args.dataset
self.m = args.m
self.lmbda = args.lmbda
self.batch_size = args.batch_size
self.ndf = args.ndf
self.n_rots = args.n_rots
self.d_out = args.d_out
self.eps = args.eps
self.n_epoch = args.n_epoch
if args.dataset == "thyroid" or args.dataset == "arrhythmia":
self.netC = model.netC1(self.d_out, self.ndf, self.n_rots).cuda()
else:
self.netC = model.netC5(self.d_out, self.ndf, self.n_rots).cuda()
model.weights_init(self.netC)
self.optimizerC = optim.Adam(self.netC.parameters(), lr=args.lr, betas=(0.5, 0.999))
def fit_trans_classifier(self, train_xs, x_test, y_test, ratio):
labels = torch.arange(self.n_rots).unsqueeze(0).expand((self.batch_size, self.n_rots)).long().cuda()
celoss = nn.CrossEntropyLoss()
print('Training')
for epoch in range(self.n_epoch):
self.netC.train()
rp = np.random.permutation(len(train_xs))
n_batch = 0
sum_zs = torch.zeros((self.ndf, self.n_rots)).cuda()
for i in range(0, len(train_xs), self.batch_size):
self.netC.zero_grad()
batch_range = min(self.batch_size, len(train_xs) - i)
train_labels = labels
if batch_range == len(train_xs) - i:
train_labels = torch.arange(self.n_rots).unsqueeze(0).expand((len(train_xs) - i, self.n_rots)).long().cuda()
idx = np.arange(batch_range) + i
xs = torch.from_numpy(train_xs[rp[idx]]).float().cuda()
tc_zs, ce_zs = self.netC(xs)
sum_zs = sum_zs + tc_zs.mean(0)
tc_zs = tc_zs.permute(0, 2, 1)
loss_ce = celoss(ce_zs, train_labels)
er = self.lmbda * tc_loss(tc_zs, self.m) + loss_ce
er.backward()
self.optimizerC.step()
n_batch += 1
means = sum_zs.t() / n_batch
means = means.unsqueeze(0)
self.netC.eval()
with torch.no_grad():
val_probs_rots = np.zeros((len(y_test), self.n_rots))
for i in range(0, len(x_test), self.batch_size):
batch_range = min(self.batch_size, len(x_test) - i)
idx = np.arange(batch_range) + i
xs = torch.from_numpy(x_test[idx]).float().cuda()
zs, fs = self.netC(xs)
zs = zs.permute(0, 2, 1)
diffs = ((zs.unsqueeze(2) - means) ** 2).sum(-1)
diffs_eps = self.eps * torch.ones_like(diffs)
diffs = torch.max(diffs, diffs_eps)
logp_sz = torch.nn.functional.log_softmax(-diffs, dim=2)
val_probs_rots[idx] = -torch.diagonal(logp_sz, 0, 1, 2).cpu().data.numpy()
val_probs_rots = val_probs_rots.sum(1)
f1_score = f_score(val_probs_rots, y_test, ratio)
print("Epoch:", epoch, ", fscore: ", f1_score)
return f1_score
| 3,968 | 39.5 | 128 | py |
GOAD | GOAD-master/fcnet.py | import torch.nn as nn
import torch.nn.init as init
import numpy as np
def weights_init(m):
classname = m.__class__.__name__
if isinstance(m, nn.Linear):
init.xavier_normal_(m.weight, gain=np.sqrt(2.0))
elif classname.find('Conv') != -1:
init.xavier_normal_(m.weight, gain=np.sqrt(2.0))
elif classname.find('Linear') != -1:
init.eye_(m.weight)
elif classname.find('Emb') != -1:
init.normal(m.weight, mean=0, std=0.01)
class netC5(nn.Module):
def __init__(self, d, ndf, nc):
super(netC5, self).__init__()
self.trunk = nn.Sequential(
nn.Conv1d(d, ndf, kernel_size=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv1d(ndf, ndf, kernel_size=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv1d(ndf, ndf, kernel_size=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv1d(ndf, ndf, kernel_size=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv1d(ndf, ndf, kernel_size=1, bias=False),
)
self.head = nn.Sequential(
nn.LeakyReLU(0.2, inplace=True),
nn.Conv1d(ndf, nc, kernel_size=1, bias=True),
)
def forward(self, input):
tc = self.trunk(input)
ce = self.head(tc)
return tc, ce
class netC1(nn.Module):
def __init__(self, d, ndf, nc):
super(netC1, self).__init__()
self.trunk = nn.Sequential(
nn.Conv1d(d, ndf, kernel_size=1, bias=False),
)
self.head = nn.Sequential(
nn.LeakyReLU(0.2, inplace=True),
nn.Conv1d(ndf, nc, kernel_size=1, bias=True),
)
def forward(self, input):
tc = self.trunk(input)
ce = self.head(tc)
return tc, ce | 1,759 | 30.428571 | 56 | py |
GOAD | GOAD-master/wideresnet.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
# The code is adapted from https://github.com/xternalz/WideResNet-pytorch/blob/master/wideresnet.py
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
out = self.conv1(out if self.equalInOut else x) # todo
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
# nChannels = [16, 16*widen_factor, 32*widen_factor, 32*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes) # todo
# self.fout = nn.Linear(ndf, num_classes)
self.nChannels = nChannels[3]
# self.softmax = nn.Softmax()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
act = out.view(-1, self.nChannels)
fs = self.fc(act)
# out = F.log_softmax(out, dim=1)
return act, fs | 4,139 | 40.4 | 116 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/test.py | import time
import os
from tqdm import tqdm
import torch
from torch import nn
from torch.utils.data import DataLoader
from datasets import load_data
from utils import AverageMeter, load_checkpoint, parse_opt
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def test(model: nn.Module, model_name: str, test_loader: DataLoader) -> None:
# track metrics
accs = AverageMeter() # accuracies
# evaluate in batches
with torch.no_grad():
for i, batch in enumerate(tqdm(test_loader, desc = 'Evaluating')):
if model_name in ['han']:
documents, sentences_per_document, words_per_sentence, labels = batch
documents = documents.to(device) # (batch_size, sentence_limit, word_limit)
sentences_per_document = sentences_per_document.squeeze(1).to(device) # (batch_size)
words_per_sentence = words_per_sentence.to(device) # (batch_size, sentence_limit)
labels = labels.squeeze(1).to(device) # (batch_size)
# forward
scores, word_alphas, sentence_alphas = model(
documents,
sentences_per_document,
words_per_sentence
) # (n_documents, n_classes), (n_documents, max_doc_len_in_batch, max_sent_len_in_batch), (n_documents, max_doc_len_in_batch)
else:
sentences, words_per_sentence, labels = batch
sentences = sentences.to(device) # (batch_size, word_limit)
words_per_sentence = words_per_sentence.squeeze(1).to(device) # (batch_size)
labels = labels.squeeze(1).to(device) # (batch_size)
# for torchtext
# sentences = batch.text[0].to(device) # (batch_size, word_limit)
# words_per_sentence = batch.text[1].to(device) # (batch_size)
# labels = batch.label.to(device) # (batch_size)
scores = model(sentences, words_per_sentence) # (batch_size, n_classes)
# accuracy
_, predictions = scores.max(dim=1) # (n_documents)
correct_predictions = torch.eq(predictions, labels).sum().item()
accuracy = correct_predictions / labels.size(0)
# keep track of metrics
accs.update(accuracy, labels.size(0))
# final test accuracy
print('\n * TEST ACCURACY - %.1f percent\n' % (accs.avg * 100))
if __name__ == '__main__':
config = parse_opt()
# load model
checkpoint_path = os.path.join(config.checkpoint_path, config.checkpoint_basename + '.pth.tar')
model, _, _, _, _, _ = load_checkpoint(checkpoint_path, device)
model = model.to(device)
model.eval()
# load test data
test_loader = load_data(config, 'test')
test(model, config.model_name, test_loader) | 2,873 | 37.837838 | 142 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/nlp_dl_bench_train.py | import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import torch
import torch.backends.cudnn as cudnn
from torch import optim, nn
import time
import random
import models
from trainer import Trainer
from datasets import load_data
from utils import load_embeddings, load_checkpoint, parse_opt
def set_trainer(config, args):
model_name = args['model_name']
batch_size = args['batch_size']
data_name = args['data_name']
learning_rate = args['learning_rate']
iter_num = args['iter_num']
lr_decay = args['lr_decay']
args['use_train_accuracy'] = use_train_accuracy
args['use_sgd'] = use_sgd
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
cudnn.benchmark = True # set to true only if inputs to model are fixed size; otherwise lot of computational overhead
# load a checkpoint
if config.checkpoint is not None:
# load data
train_loader = load_data(config, 'train', False)
model, optimizer, word_map, start_epoch = load_checkpoint(config.checkpoint, device)
print('\nLoaded checkpoint from epoch %d.\n' % (start_epoch - 1))
# or initialize model
else:
start_epoch = 0
# load data
train_loader, test_loader, embeddings, emb_size, word_map, n_classes, vocab_size, train_tuple_num = load_data(args, config, 'train', True)
test_tuple_num = train_tuple_num
if (use_train_accuracy == False):
test_loader, test_tuple_num = load_data(args, config, 'test', True)
model = models.make(
config = config,
n_classes = n_classes,
vocab_size = vocab_size,
embeddings = embeddings,
emb_size = emb_size
)
if (use_sgd == True):
optimizer = optim.SGD(
params = filter(lambda p: p.requires_grad, model.parameters()),
lr = learning_rate
)
else:
optimizer = optim.Adam(
params = filter(lambda p: p.requires_grad, model.parameters()),
lr = learning_rate
)
# loss functions
loss_function = nn.CrossEntropyLoss()
# move to device
model = model.to(device)
loss_function = loss_function.to(device)
trainer = Trainer(
num_epochs = iter_num,
start_epoch = start_epoch,
train_loader = train_loader,
test_loader = test_loader,
train_tuple_num = test_tuple_num,
model = model,
model_name = model_name,
loss_function = loss_function,
optimizer = optimizer,
lr_decay = lr_decay,
dataset_name = data_name,
word_map = word_map,
grad_clip = config.grad_clip,
print_freq = config.print_freq,
checkpoint_path = config.checkpoint_path,
checkpoint_basename = config.checkpoint_basename,
tensorboard = config.tensorboard,
log_dir = config.log_dir
)
return trainer
def get_current_time_filename():
return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
if __name__ == '__main__':
# batch_size: 128 # batch size
# lr: 0.001 # learning rate
# lr_decay: 0.3 # a factor to multiply learning rate with (0, 1)
# workers: 1 # number of workers for loading data in the DataLoader
# num_epochs: 5 # number of epochs to run
base_dir = '/mnt/ds3lab-scratch/xuliji/code/CorgiPile-PyTorch'
log_dir = 'train_log_nlp_sgd'
# model_name = 'han'
model_name = 'textcnn'
data_name = 'yelp_review_full'
use_clustered_data = True
use_train_accuracy = False # If False, it will compute and output test accuracy instead of train accuracy
use_sgd = True # If false, it will use Adam instead of SGD
#batch_size = 256
batch_size = 128
iter_num = 3
num_workers = 1
lr_decay = 0.95
shuffle_modes = ['once_shuffle', 'block', 'sliding_window', 'bismarck_mrs', 'no_shuffle', 'block_only']
n_records = 0
if (data_name == 'yelp_review_full'):
if (model_name == 'han'):
learning_rate = 0.001
elif (model_name == 'textcnn'):
learning_rate = 0.001
block_num = 650
else:
print ('Error in the data_name')
args = {}
args['use_clustered_data'] = use_clustered_data
args['use_train_accuracy'] = use_train_accuracy
args['use_sgd'] = use_sgd
args['model_name'] = model_name
args['batch_size'] = batch_size
args['iter_num'] = iter_num
args['n_records'] = n_records
args['learning_rate'] = learning_rate
args['num_workers'] = num_workers
args['data_name'] = data_name
args['lr_decay'] = lr_decay
# for our-block-based sgd
buffer_size_ratio = 0.1
# for sliding_window
sliding_window_size_ratio = 0.1
# for bismarck_mrs
bismarck_buffer_size_ratio = 0.1
select_ratio_from_old_buffers = [0.4, 0.5]
args['block_num'] = block_num
args['buffer_size_ratio'] = buffer_size_ratio
args['sliding_window_size_ratio'] = sliding_window_size_ratio
args['bismarck_buffer_size_ratio'] = bismarck_buffer_size_ratio
args['old_buffer_checkpoint_dir'] = '/mnt/ds3lab-scratch/xuliji/code/CorgiPile-PyTorch/checkpoint/' + get_current_time_filename() + str(random.randint(1,100))
for shuffle_mode in shuffle_modes:
args['shuffle_mode'] = shuffle_mode
if (shuffle_mode == 'bismarck_mrs'):
for ratio in select_ratio_from_old_buffers:
args['select_ratio_from_old_buffer'] = ratio
log_txt = shuffle_mode + '_' + data_name + '_lr' + str(learning_rate) + '_ratio_' + str(ratio) + '_' + get_current_time_filename() + '.txt'
outdir = os.path.join(base_dir, log_dir, data_name, model_name, 'sgd-bs' + str(batch_size), shuffle_mode)
log_file = os.path.join(outdir, log_txt)
args['log_file'] = log_file
if not os.path.exists(outdir):
os.makedirs(outdir)
config = parse_opt(data_name, model_name)
trainer = set_trainer(config, args)
trainer.run_train(args)
else:
log_txt = shuffle_mode + '_' + data_name + '_lr' + str(learning_rate) + '_' + get_current_time_filename() + '.txt'
outdir = os.path.join(base_dir, log_dir, data_name, model_name, 'sgd-bs' + str(batch_size), shuffle_mode)
log_file = os.path.join(outdir, log_txt)
args['log_file'] = log_file
if not os.path.exists(outdir):
os.makedirs(outdir)
config = parse_opt(data_name, model_name)
trainer = set_trainer(config, args)
trainer.run_train(args)
| 6,811 | 31.438095 | 162 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/classify.py | import os
import json
from nltk.tokenize import PunktSentenceTokenizer, TreebankWordTokenizer
from typing import Tuple, Dict
import torch
from torch import nn
from datasets import get_clean_text, get_label_map, load_data
from utils import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# path to the checkpoint
checkpoint_path = '/Users/zou/Renovamen/Developing/Text-Classification/checkpoints/checkpoint_fasttext_agnews.pth.tar'
# pad limits
# only makes sense when model_name = 'han'
sentence_limit_per_doc = 15
word_limit_per_sentence = 20
# only makes sense when model_name != 'han'
word_limit = 200
def prepro_doc(
document: str, word_map: Dict[str, int]
) -> Tuple[torch.LongTensor, torch.LongTensor, torch.LongTensor]:
"""
Preprocess a document into a hierarchial representation
Parameters
----------
document : str
A document in text form
word_map : Dict[str, int]
Word2ix map
Returns
-------
encoded_doc : torch.LongTensor
Pre-processed tokenized document
sentences_per_doc : torch.LongTensor
Document lengths
words_per_each_sentence : torch.LongTensor
Sentence lengths
"""
# tokenizers
sent_tokenizer = PunktSentenceTokenizer()
word_tokenizer = TreebankWordTokenizer()
# a list to store the document tokenized into words
doc = list()
# tokenize document into sentences
sentences = list()
for paragraph in get_clean_text(document).splitlines():
sentences.extend([s for s in sent_tokenizer.tokenize(paragraph)])
# tokenize sentences into words
for s in sentences[:sentence_limit_per_doc]:
w = word_tokenizer.tokenize(s)[:word_limit_per_sentence]
if len(w) == 0:
continue
doc.append(w)
# number of sentences in the document
sentences_per_doc = len(doc)
sentences_per_doc = torch.LongTensor([sentences_per_doc]).to(device) # (1)
# number of words in each sentence
words_per_each_sentence = list(map(lambda s: len(s), doc))
words_per_each_sentence = torch.LongTensor(words_per_each_sentence).unsqueeze(0).to(device) # (1, n_sentences)
# encode document with indices from the word map
encoded_doc = list(
map(lambda s: list(
map(lambda w: word_map.get(w, word_map['<unk>']), s)
) + [0] * (word_limit_per_sentence - len(s)), doc)
) + [[0] * word_limit_per_sentence] * (sentence_limit_per_doc - len(doc))
encoded_doc = torch.LongTensor(encoded_doc).unsqueeze(0).to(device)
return encoded_doc, sentences_per_doc, words_per_each_sentence
def prepro_sent(
text: str, word_map: Dict[str, int]
) -> Tuple[torch.LongTensor, torch.LongTensor]:
"""
Preprocess a sentence
Parameters
----------
text : str
A sentence in text form
word_map : Dict[str, int]
Word2ix map
Returns
-------
encoded_sent : torch.LongTensor
Pre-processed tokenized sentence
words_per_sentence : torch.LongTensor
Sentence lengths
"""
# tokenizers
word_tokenizer = TreebankWordTokenizer()
# tokenize sentences into words
sentence = word_tokenizer.tokenize(text)[:word_limit]
# number of words in sentence
words_per_sentence = len(sentence)
words_per_sentence = torch.LongTensor([words_per_sentence]).to(device) # (1)
# encode sentence with indices from the word map
encoded_sent = list(
map(lambda w: word_map.get(w, word_map['<unk>']), sentence)
) + [0] * (word_limit - len(sentence))
encoded_sent = torch.LongTensor(encoded_sent).unsqueeze(0).to(device)
return encoded_sent, words_per_sentence
def classify(
text: str, model: nn.Module, model_name: str, dataset_name: str, word_map: Dict[str, int]
) -> str:
"""
Classify a text using the given model.
Parameters
----------
text : str
A document or sentence in text form
model : nn.Module
A loaded model
model_name : str
Name of the model
dataset_name : str
Name of the dataset
word_map : Dict[str, int]
Word2ix map
Returns
-------
prediction : str
The predicted category with its probability
"""
_, rev_label_map = get_label_map(dataset_name)
if model_name in ['han']:
# preprocess document
encoded_doc, sentences_per_doc, words_per_each_sentence = prepro_doc(text, word_map)
# run through model
scores, word_alphas, sentence_alphas = model(
encoded_doc,
sentences_per_doc,
words_per_each_sentence
) # (1, n_classes), (1, n_sentences, max_sent_len_in_document), (1, n_sentences)
else:
# preprocess sentence
encoded_sent, words_per_sentence = prepro_sent(text, word_map)
# run through model
scores = model(encoded_sent, words_per_sentence)
scores = scores.squeeze(0) # (n_classes)
scores = nn.functional.softmax(scores, dim=0) # (n_classes)
# find best prediction and its probability
score, prediction = scores.max(dim=0)
prediction = 'Category: {category}, Probability: {score:.2f}%'.format(
category = rev_label_map[prediction.item()],
score = score.item() * 100
)
return prediction
# word_alphas = word_alphas.squeeze(0) # (n_sentences, max_sent_len_in_document)
# sentence_alphas = sentence_alphas.squeeze(0) # (n_sentences)
# words_per_each_sentence = words_per_each_sentence.squeeze(0) # (n_sentences)
# return doc, scores, word_alphas, sentence_alphas, words_per_each_sentence
if __name__ == '__main__':
text = 'How do computers work? I have a CPU I want to use. But my keyboard and motherboard do not help.\n\n You can just google how computers work. Honestly, its easy.'
# text = 'But think about it! It\'s so cool. Physics is really all about math. what feynman said, hehe'
# text = "I think I'm falling sick. There was some indigestion at first. But now a fever is beginning to take hold."
# text = "I want to tell you something important. Get into the stock market and investment funds. Make some money so you can buy yourself some yogurt."
# text = "You know what's wrong with this country? republicans and democrats. always at each other's throats\n There's no respect, no bipartisanship."
# load model and word map
model, model_name, _, dataset_name, word_map, _ = load_checkpoint(checkpoint_path, device)
model = model.to(device)
model.eval()
# visualize_attention(*classify(text, model, model_name, dataset_name, word_map))
prediction = classify(text, model, model_name, dataset_name, word_map)
print(prediction) | 6,739 | 32.039216 | 172 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/trainer/trainer.py | import time
from typing import Optional, Dict
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
import os
import torch.backends.cudnn as cudnn
from tqdm import tqdm
from utils import TensorboardWriter, AverageMeter, save_checkpoint, \
clip_gradient, adjust_learning_rate
def get_current_time() :
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
class Trainer:
"""
Training pipeline
Parameters
----------
num_epochs : int
We should train the model for __ epochs
start_epoch : int
We should start training the model from __th epoch
train_loader : DataLoader
DataLoader for training data
model : nn.Module
Model
model_name : str
Name of the model
loss_function : nn.Module
Loss function (cross entropy)
optimizer : optim.Optimizer
Optimizer (Adam)
lr_decay : float
A factor in interval (0, 1) to multiply the learning rate with
dataset_name : str
Name of the dataset
word_map : Dict[str, int]
Word2id map
grad_clip : float, optional
Gradient threshold in clip gradients
print_freq : int
Print training status every __ batches
checkpoint_path : str, optional
Path to the folder to save checkpoints
checkpoint_basename : str, optional, default='checkpoint'
Basename of the checkpoint
tensorboard : bool, optional, default=False
Enable tensorboard or not?
log_dir : str, optional
Path to the folder to save logs for tensorboard
"""
def __init__(
self,
num_epochs: int,
start_epoch: int,
train_loader: DataLoader,
test_loader: DataLoader,
train_tuple_num: int,
model: nn.Module,
model_name: str,
loss_function: nn.Module,
optimizer,
lr_decay: float,
dataset_name: str,
word_map: Dict[str, int],
grad_clip = Optional[None],
print_freq: int = 100,
checkpoint_path: Optional[str] = None,
checkpoint_basename: str = 'checkpoint',
tensorboard: bool = False,
log_dir: Optional[str] = None
) -> None:
self.num_epochs = num_epochs
self.start_epoch = start_epoch
self.train_loader = train_loader
self.test_loader = test_loader
self.model = model
self.model_name = model_name
self.loss_function = loss_function
self.optimizer = optimizer
self.lr_decay = lr_decay
self.dataset_name = dataset_name
self.word_map = word_map
self.print_freq = print_freq
self.grad_clip = grad_clip
self.checkpoint_path = checkpoint_path
self.checkpoint_basename = checkpoint_basename
# setup visualization writer instance
# self.writer = TensorboardWriter(log_dir, tensorboard)
self.len_epoch = len(self.train_loader)
self.train_tuple_num = train_tuple_num
self.accuracy = 0.0
self.total_loss = 0.0
def clear_loss_accuracy(self):
self.accuracy = 0.0
self.total_loss = 0.0
def train(self, epoch: int) -> None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if device == 'cuda':
cudnn.benchmark = True # set to true only if inputs to model are fixed size; otherwise lot of computational overhead
start = time.time()
"""
Train an epoch
Parameters
----------
epoch : int
Current number of epoch
"""
self.model.train() # training mode enables dropout
# batch_time = AverageMeter() # forward prop. + back prop. time per batch
# data_time = AverageMeter() # data loading time per batch
# losses = AverageMeter(tag='loss', writer=self.writer) # cross entropy loss
# accs = AverageMeter(tag='acc', writer=self.writer) # accuracies
# batches
for i, batch in enumerate(self.train_loader):
# data_time.update(time.time() - start)
if self.model_name in ['han']:
documents, sentences_per_document, words_per_sentence, labels = batch
documents = documents.to(device) # (batch_size, sentence_limit, word_limit)
sentences_per_document = sentences_per_document.squeeze(1).to(device) # (batch_size)
words_per_sentence = words_per_sentence.to(device) # (batch_size, sentence_limit)
labels = labels.squeeze(1).to(device) # (batch_size)
# forward
scores, _, _ = self.model(
documents,
sentences_per_document,
words_per_sentence
) # (n_documents, n_classes), (n_documents, max_doc_len_in_batch, max_sent_len_in_batch), (n_documents, max_doc_len_in_batch)
else:
sentences, words_per_sentence, labels = batch
sentences = sentences.to(device) # (batch_size, word_limit)
words_per_sentence = words_per_sentence.squeeze(1).to(device) # (batch_size)
labels = labels.squeeze(1).to(device) # (batch_size)
# for torchtext
# sentences = batch.text[0].to(device) # (batch_size, word_limit)
# words_per_sentence = batch.text[1].to(device) # (batch_size)
# labels = batch.label.to(device) # (batch_size)
scores = self.model(sentences, words_per_sentence) # (batch_size, n_classes)
# calc loss
loss = self.loss_function(scores, labels) # scalar
# backward
self.optimizer.zero_grad()
loss.backward()
# clip gradients
if self.grad_clip is not None:
clip_gradient(self.optimizer, self.grad_clip)
# update weights
self.optimizer.step()
# if (i % 100 == 0):
# print (get_current_time(), i)
# find accuracy
# _, predictions = scores.max(dim = 1) # (n_documents)
# correct_predictions = torch.eq(predictions, labels).sum().item()
# accuracy = correct_predictions / labels.size(0)
# set step for tensorboard
# step = (epoch - 1) * self.len_epoch + i
# self.writer.set_step(step=step, mode='train')
# keep track of metrics
# batch_time.update(time.time() - start)
# losses.update(loss.item(), labels.size(0))
# accs.update(accuracy, labels.size(0))
# start = time.time()
# print training status
# if i % self.print_freq == 0:
# print(
# 'Epoch: [{0}][{1}/{2}]\t'
# 'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
# 'Data Load Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
# 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
# 'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
# epoch, i, len(self.train_loader),
# batch_time = batch_time,
# data_time = data_time,
# loss = losses,
# acc = accs
# )
# )
grad_end = time.time()
return (start, grad_end)
def test(self, model: nn.Module, model_name: str, test_loader: DataLoader) -> None:
# track metrics
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if device == 'cuda':
cudnn.benchmark = True # set to true only if inputs to model are fixed size; otherwise lot of computational overhead
# accs = AverageMeter() # accuracies
# evaluate in batches
with torch.no_grad():
for i, batch in enumerate(self.test_loader):
if model_name in ['han']:
documents, sentences_per_document, words_per_sentence, labels = batch
documents = documents.to(device) # (batch_size, sentence_limit, word_limit)
sentences_per_document = sentences_per_document.squeeze(1).to(device) # (batch_size)
words_per_sentence = words_per_sentence.to(device) # (batch_size, sentence_limit)
labels = labels.squeeze(1).to(device) # (batch_size)
# forward
scores, word_alphas, sentence_alphas = model(
documents,
sentences_per_document,
words_per_sentence
) # (n_documents, n_classes), (n_documents, max_doc_len_in_batch, max_sent_len_in_batch), (n_documents, max_doc_len_in_batch)
else:
sentences, words_per_sentence, labels = batch
sentences = sentences.to(device) # (batch_size, word_limit)
words_per_sentence = words_per_sentence.squeeze(1).to(device) # (batch_size)
labels = labels.squeeze(1).to(device) # (batch_size)
# for torchtext
# sentences = batch.text[0].to(device) # (batch_size, word_limit)
# words_per_sentence = batch.text[1].to(device) # (batch_size)
# labels = batch.label.to(device) # (batch_size)
scores = model(sentences, words_per_sentence) # (batch_size, n_classes)
# accuracy
_, predictions = scores.max(dim=1) # (n_documents)
correct_predictions = torch.eq(predictions, labels).sum().item()
# accuracy = correct_predictions / labels.size(0)
self.accuracy += correct_predictions
loss = self.loss_function(scores, labels) # scalar
self.total_loss += loss
# keep track of metrics
# accs.update(accuracy, labels.size(0))
# final test accuracy
# print('\n * TEST ACCURACY - %.1f percent\n' % (accs.avg * 100))
loss_end = time.time()
return (self.total_loss, self.accuracy, loss_end)
def run_train(self, args):
avg_exec_t = 0.0
avg_grad_t = 0.0
avg_loss_t = 0.0
first_exec_t = 0.0
first_grad_t = 0.0
first_loss_t = 0.0
second_exec_t = 0.0
second_grad_t = 0.0
second_loss_t = 0.0
max_accuracy = 0.0
log_file = args['log_file']
writer = open(log_file, 'w')
for k in args:
writer.write("[params] " + str(k) + " = " + str(args[k]) + "\n")
writer.flush()
writer.write('[Computed] train_tuple_num = %d' % (self.train_tuple_num))
writer.write('\n')
writer.write('[%s] Start iteration' % get_current_time())
writer.write('\n')
iter_num = self.num_epochs
# epochs
for epoch in range(0, self.num_epochs):
# trian an epoch
self.clear_loss_accuracy()
(start, grad_end) = self.train(epoch=epoch)
# time per epoch
# epoch_time = time.time() - start
# print('Epoch: [{0}] finished, time consumed: {epoch_time:.3f}'.format(epoch, epoch_time=epoch_time))
(iter_loss, iter_acc, loss_end) = self.test(self.model, self.model_name, self.test_loader)
# decay learning rate every epoch
adjust_learning_rate(self.optimizer, self.lr_decay)
# save checkpoint
# if self.checkpoint_path is not None:
# save_checkpoint(
# epoch = epoch,
# model = self.model,
# model_name = self.model_name,
# optimizer = self.optimizer,
# dataset_name = self.dataset_name,
# word_map = self.word_map,
# checkpoint_path = self.checkpoint_path,
# checkpoint_basename = self.checkpoint_basename
# )
# start = time.time()
exec_t = loss_end - start
grad_t = grad_end - start
loss_t = exec_t - grad_t
avg_exec_t += exec_t
avg_grad_t += grad_t
avg_loss_t += loss_t
if epoch == 0:
first_exec_t = exec_t
first_grad_t = grad_t
first_loss_t = loss_t
elif epoch == 1:
second_exec_t = exec_t
second_grad_t = grad_t
second_loss_t = loss_t
accuracy = iter_acc / self.train_tuple_num * 100
# print('[%s] [Epoch %2d] Loss = %.2f, acc = %.2f, exec_t = %.2fs, grad_t = %.2fs, loss_t = %.2fs' %
# (get_current_time(), i + 1, iter_loss, accuracy, round(exec_t, 2),
# round(grad_t, 2), round(loss_t, 2)))
writer.write('[%s] [Epoch %2d] Loss = %.2f, acc = %.2f, exec_t = %.2fs, grad_t = %.2fs, loss_t = %.2fs' %
(get_current_time(), epoch + 1, iter_loss, accuracy, round(exec_t, 2),
round(grad_t, 2), round(loss_t, 2)))
writer.write('\n')
writer.flush()
if accuracy > max_accuracy:
max_accuracy = accuracy
writer.write('[%s] [Finish] avg_exec_t = %.2fs, avg_grad_t = %.2fs, avg_loss_t = %.2fs' %
(get_current_time(), avg_exec_t / iter_num,
avg_grad_t / iter_num, avg_loss_t / iter_num))
writer.write('\n')
if iter_num > 2:
avg_exec_t -= first_exec_t
avg_grad_t -= first_grad_t
avg_loss_t -= first_loss_t
# print('[%s] [-first] avg_exec_t = %.2fs, avg_grad_t = %.2fs, avg_loss_t = %.2fs' %
# (get_current_time(), avg_exec_t / (iter_num - 1),
# avg_grad_t / (iter_num - 1), avg_loss_t / (iter_num - 1)))
writer.write('[%s] [-first] avg_exec_t = %.2fs, avg_grad_t = %.2fs, avg_loss_t = %.2fs' %
(get_current_time(), avg_exec_t / (iter_num - 1),
avg_grad_t / (iter_num - 1), avg_loss_t / (iter_num - 1)))
writer.write('\n')
avg_exec_t -= second_exec_t
avg_grad_t -= second_grad_t
avg_loss_t -= second_loss_t
# print('[%s] [-1 & 2] avg_exec_t = %.2fs, avg_grad_t = %.2fs, avg_loss_t = %.2fs' %
# (get_current_time(), avg_exec_t / (iter_num - 2),
# avg_grad_t / (iter_num - 2), avg_loss_t / (iter_num - 2)))
writer.write('[%s] [-1 & 2] avg_exec_t = %.2fs, avg_grad_t = %.2fs, avg_loss_t = %.2fs' %
(get_current_time(), avg_exec_t / (iter_num - 2),
avg_grad_t / (iter_num - 2), avg_loss_t / (iter_num - 2)))
writer.write('\n')
# print('[%s] [MaxAcc] max_accuracy = %.2f' %
# (get_current_time(), max_accuracy))
writer.write('[%s] [MaxAcc] max_accuracy = %.2f' %
(get_current_time(), max_accuracy))
writer.write('\n') | 15,444 | 35.426887 | 146 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/models/__init__.py | import torch
from .HAN import HAN
from .fastText import fastText
from .AttBiLSTM import AttBiLSTM
from .TextCNN import TextCNN1D, TextCNN2D
from .Transformer import Transformer
from utils.opts import Config
def make(
config: Config,
n_classes: int,
vocab_size: int,
embeddings: torch.Tensor,
emb_size: int
) -> torch.nn.Module:
"""
Make a model
Parameters
----------
config : Config
Configuration settings
n_classes : int
Number of classes
vocab_size : int
Size of vocabulary
embeddings : torch.Tensor
Word embedding weights
emb_size : int
Size of word embeddings
"""
if config.model_name == 'han':
model = HAN(
n_classes = n_classes,
vocab_size = vocab_size,
embeddings = embeddings,
emb_size = emb_size,
fine_tune = config.fine_tune_word_embeddings,
word_rnn_size = config.word_rnn_size,
sentence_rnn_size = config.sentence_rnn_size,
word_rnn_layers = config.word_rnn_layers,
sentence_rnn_layers = config.sentence_rnn_layers,
word_att_size = config.word_att_size,
sentence_att_size = config.sentence_att_size,
dropout = config.dropout
)
elif config.model_name == 'fasttext':
model = fastText(
n_classes = n_classes,
vocab_size = vocab_size,
embeddings = embeddings,
emb_size = emb_size,
fine_tune = config.fine_tune_word_embeddings,
hidden_size = config.hidden_size
)
elif config.model_name == 'attbilstm':
model = AttBiLSTM(
n_classes = n_classes,
vocab_size = vocab_size,
embeddings = embeddings,
emb_size = emb_size,
fine_tune = config.fine_tune_word_embeddings,
rnn_size = config.rnn_size,
rnn_layers = config.rnn_layers,
dropout = config.dropout
)
elif config.model_name == 'textcnn':
if config.conv_layer == '2D':
model = TextCNN2D(
n_classes = n_classes,
vocab_size = vocab_size,
embeddings = embeddings,
emb_size = emb_size,
fine_tune = config.fine_tune_word_embeddings,
n_kernels = config.n_kernels,
kernel_sizes = config.kernel_sizes,
n_channels = config.n_channels,
dropout = config.dropout
)
elif config.conv_layer == '1D':
model = TextCNN1D(
n_classes = n_classes,
vocab_size = vocab_size,
embeddings = embeddings,
emb_size = emb_size,
fine_tune = config.fine_tune_word_embeddings,
n_kernels = config.n_kernels,
kernel_sizes = config.kernel_sizes,
n_channels = config.n_channels,
dropout = config.dropout
)
else:
raise Exception("Convolution layer not supported: ", config.conv_layer)
elif config.model_name == 'transformer':
model = Transformer(
n_classes = n_classes,
vocab_size = vocab_size,
embeddings = embeddings,
d_model = emb_size,
word_pad_len = config.word_limit,
fine_tune = config.fine_tune_word_embeddings,
hidden_size = config.hidden_size,
n_heads = config.n_heads,
n_encoders = config.n_encoders,
dropout = config.dropout
)
else:
raise Exception("Model not supported: ", config.model_name)
return model
| 3,745 | 31.293103 | 83 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/models/TextCNN/cnn2d.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List
class TextCNN2D(nn.Module):
"""
Implementation of 2D version of TextCNN proposed in paper [1].
`Here <https://github.com/yoonkim/CNN_sentence>`_ is the official
implementation of TextCNN.
Parameters
----------
n_classes : int
Number of classes
vocab_size : int
Number of words in the vocabulary
embeddings : torch.Tensor
Word embedding weights
emb_size : int
Size of word embeddings
fine_tune : bool
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
n_kernels : int
Number of kernels
kernel_sizes : List[int]
Size of each kernel
dropout : float
Dropout
n_channels : int
Number of channels (1 / 2)
References
----------
1. "`Convolutional Neural Networks for Sentence Classification. \
<https://www.aclweb.org/anthology/D14-1181.pdf>`_" Yoon Kim. EMNLP 2014.
"""
def __init__(
self,
n_classes: int,
vocab_size: int,
embeddings: torch.Tensor,
emb_size: int,
fine_tune: bool,
n_kernels: int,
kernel_sizes: List[int],
dropout: float,
n_channels = 1
) -> None:
super(TextCNN2D, self).__init__()
# embedding layer
self.embedding1 = nn.Embedding(vocab_size, emb_size)
self.set_embeddings(embeddings, 1, fine_tune)
if n_channels == 2:
# multichannel: a static channel and a non-static channel
# which means embedding2 is frozen
self.embedding2 = nn.Embedding(vocab_size, emb_size)
self.set_embeddings(embeddings, 1, False)
else:
self.embedding2 = None
# 2d conv layer
self.convs = nn.ModuleList([
nn.Conv2d(
in_channels = n_channels,
out_channels = n_kernels,
kernel_size = (size, emb_size)
)
for size in kernel_sizes
])
self.fc = nn.Linear(len(kernel_sizes) * n_kernels, n_classes)
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def set_embeddings(
self,
embeddings: torch.Tensor,
layer_id: int = 1,
fine_tune: bool = True
) -> None:
"""
Set weights for embedding layer
Parameters
----------
embeddings : torch.Tensor
Word embeddings
layer_id : int
Embedding layer 1 or 2 (when adopting multichannel architecture)
fine_tune : bool, optional, default=True
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
"""
if embeddings is None:
# initialize embedding layer with the uniform distribution
if layer_id == 1:
self.embedding1.weight.data.uniform_(-0.1, 0.1)
else:
self.embedding2.weight.data.uniform_(-0.1, 0.1)
else:
# initialize embedding layer with pre-trained embeddings
if layer_id == 1:
self.embedding1.weight = nn.Parameter(embeddings, requires_grad = fine_tune)
else:
self.embedding2.weight = nn.Parameter(embeddings, requires_grad = fine_tune)
def forward(self, text: torch.Tensor, words_per_sentence: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
text : torch.Tensor (batch_size, word_pad_len)
Input data
words_per_sentence : torch.Tensor (batch_size)
Sentence lengths
Returns
-------
scores : torch.Tensor (batch_size, n_classes)
Class scores
"""
# word embedding
embeddings = self.embedding1(text).unsqueeze(1) # (batch_size, 1, word_pad_len, emb_size)
# multichannel
if self.embedding2:
embeddings2 = self.embedding2(text).unsqueeze(1) # (batch_size, 1, word_pad_len, emb_size)
embeddings = torch.cat((embeddings, embeddings2), dim = 1) # (batch_size, 2, word_pad_len, emb_size)
# conv
conved = [self.relu(conv(embeddings)).squeeze(3) for conv in self.convs] # [(batch size, n_kernels, word_pad_len - kernel_sizes[n] + 1)]
# pooling
pooled = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in conved] # [(batch size, n_kernels)]
# flatten
flattened = self.dropout(torch.cat(pooled, dim = 1)) # (batch size, n_kernels * len(kernel_sizes))
scores = self.fc(flattened) # (batch size, n_classes)
return scores
| 4,764 | 29.544872 | 145 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/models/TextCNN/cnn1d.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List
class TextCNN1D(nn.Module):
"""
Implementation of 1D version of TextCNN proposed in paper [1].
`Here <https://github.com/yoonkim/CNN_sentence>`_ is the official
implementation of TextCNN.
Parameters
----------
n_classes : int
Number of classes
vocab_size : int
Number of words in the vocabulary
embeddings : torch.Tensor
Word embedding weights
emb_size : int
Size of word embeddings
fine_tune : bool
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
n_kernels : int
Number of kernels
kernel_sizes : List[int]
Size of each kernel
dropout : float
Dropout
n_channels : int
Number of channels (1 / 2)
References
----------
1. "`Convolutional Neural Networks for Sentence Classification. \
<https://www.aclweb.org/anthology/D14-1181.pdf>`_" Yoon Kim. EMNLP 2014.
"""
def __init__(
self,
n_classes: int,
vocab_size: int,
embeddings: torch.Tensor,
emb_size: int,
fine_tune: bool,
n_kernels: int,
kernel_sizes: List[int],
dropout: float,
n_channels = 1
) -> None:
super(TextCNN1D, self).__init__()
# embedding layer
self.embedding1 = nn.Embedding(vocab_size, emb_size)
self.set_embeddings(embeddings, 1, fine_tune)
if n_channels == 2:
# multichannel: a static channel and a non-static channel
# which means embedding2 is frozen
self.embedding2 = nn.Embedding(vocab_size, emb_size)
self.set_embeddings(embeddings, 1, False)
else:
self.embedding2 = None
# 1d conv layer
self.convs = nn.ModuleList([
nn.Conv1d(
in_channels = n_channels,
out_channels = n_kernels,
kernel_size = size * emb_size,
stride = emb_size
)
for size in kernel_sizes
])
self.fc = nn.Linear(len(kernel_sizes) * n_kernels, n_classes)
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def set_embeddings(
self,
embeddings: torch.Tensor,
layer_id: int = 1,
fine_tune: bool = True
) -> None:
"""
Set weights for embedding layer
Parameters
----------
embeddings : torch.Tensor
Word embeddings
layer_id : int
Embedding layer 1 or 2 (when adopting multichannel architecture)
fine_tune : bool, optional, default=True
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
"""
if embeddings is None:
# initialize embedding layer with the uniform distribution
if layer_id == 1:
self.embedding1.weight.data.uniform_(-0.1, 0.1)
else:
self.embedding2.weight.data.uniform_(-0.1, 0.1)
else:
# initialize embedding layer with pre-trained embeddings
if layer_id == 1:
self.embedding1.weight = nn.Parameter(embeddings, requires_grad=fine_tune)
else:
self.embedding2.weight = nn.Parameter(embeddings, requires_grad=fine_tune)
def forward(self, text: torch.Tensor, words_per_sentence: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
text : torch.Tensor (batch_size, word_pad_len)
Input data
words_per_sentence : torch.Tensor (batch_size)
Sentence lengths
Returns
-------
scores : torch.Tensor (batch_size, n_classes)
Class scores
"""
batch_size = text.size(0)
# word embedding
embeddings = self.embedding1(text).view(batch_size, 1, -1) # (batch_size, 1, word_pad_len * emb_size)
# multichannel
if self.embedding2:
embeddings2 = self.embedding2(text).view(batch_size, 1, -1) # (batch_size, 1, word_pad_len * emb_size)
embeddings = torch.cat((embeddings, embeddings2), dim=1) # (batch_size, 2, word_pad_len * emb_size)
# conv
conved = [self.relu(conv(embeddings)) for conv in self.convs] # [(batch size, n_kernels, word_pad_len - kernel_sizes[n] + 1)]
# pooling
pooled = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in conved] # [(batch size, n_kernels)]
# flatten
flattened = self.dropout(torch.cat(pooled, dim = 1)) # (batch size, n_kernels * len(kernel_sizes))
scores = self.fc(flattened) # (batch size, n_classes)
return scores
| 4,842 | 29.459119 | 134 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/models/Transformer/encoder_layer.py | import torch
import torch.nn as nn
from typing import Optional, Tuple
from .attention import MultiHeadAttention
from .ffn import PositionWiseFeedForward
class EncoderLayer(nn.Module):
"""
An encoder layer.
Parameters
----------
d_model : int
Size of word embeddings
n_heads : int
Number of attention heads
hidden_size : int
Size of position-wise feed forward network
dropout : float
Dropout
"""
def __init__(
self, d_model: int, n_heads: int, hidden_size: int, dropout: float = 0.5
) -> None:
super(EncoderLayer, self).__init__()
# an encoder layer has two sub-layers:
# - multi-head self-attention
# - positon-wise fully connected feed-forward network
self.attention = MultiHeadAttention(d_model, n_heads, dropout)
self.feed_forward = PositionWiseFeedForward(d_model, hidden_size, dropout)
def forward(
self, x: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Parameters
----------
x : torch.Tensor (batch_size, word_pad_len, d_model)
Input data
mask : torch.Tensor (batch_size, 1, word_pad_len)
Padding mask metrix, None if it is not needed
Returns
-------
out : torch.Tensor (batch_size, word_pad_len, d_model)
Output of the current encoder layer
att : torch.Tensor (batch_size, n_heads, word_pad_len, word_pad_len)
Attention weights
"""
att_out, att = self.attention(x, mask=mask) # (batch_size, word_pad_len, d_model), (batch_size, n_heads, word_pad_len, word_pad_len)
out = self.feed_forward(att_out) # (batch_size, word_pad_len, d_model)
return out, att
| 1,824 | 29.416667 | 141 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/models/Transformer/ffn.py | import torch
import torch.nn as nn
class PositionWiseFeedForward(nn.Module):
"""
Position-Wise Feed-Forward Network
Parameters
----------
d_model : int
Size of word embeddings
hidden_size : int
Size of position-wise feed forward network
dropout : float
Dropout
"""
def __init__(self, d_model: int, hidden_size: int, dropout: float = 0.5) -> None:
super(PositionWiseFeedForward, self).__init__()
self.W_1 = nn.Linear(d_model, hidden_size)
self.W_2 = nn.Linear(hidden_size, d_model)
self.layer_norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
x : torch.Tensor (batch_size, word_pad_len, d_model)
Output of multi-head self-attention network
Returns
-------
out : torch.Tensor (batch_size, word_pad_len, d_model)
Output of position-wise feed-forward network
"""
# eq.2: FFN = max(0, x W_1 + b_1) W_2 + b_2
out = self.W_2(self.relu(self.W_1(x))) # (batch_size, word_pad_len, d_model)
out = self.dropout(out)
out += x # residual connection
out = self.layer_norm(out) # LayerNorm
return out
| 1,361 | 26.24 | 85 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/models/Transformer/transformer.py | import copy
import torch
from torch import nn
from .pe import PositionalEncoding
from .encoder_layer import EncoderLayer
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_padding_mask(seq: torch.Tensor, pad_idx: int = 0) -> torch.Tensor:
"""
Mask tokens that are pads (not pad: 1, pad: 0)
Parameters
----------
seq : torch.Tensor (batch_size, word_pad_len)
The sequence which needs masking
pad_idx: index of '<pad>' (default is 0)
Returns
-------
mask : torch.Tensor (batch_size, 1, word_pad_len)
A padding mask metrix
"""
mask = (seq != pad_idx).unsqueeze(-2).to(device) # (batch_size, 1, word_pad_len)
return mask
class Transformer(nn.Module):
"""
Implementation of Transformer proposed in paper [1]. Only the encoder part
is used here.
`Here <https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py>`_
is the official TensorFlow implementation of Transformer.
Parameters
----------
n_classes : int
Number of classes
vocab_size : int
Number of words in the vocabulary
embeddings : torch.Tensor
Word embedding weights
d_model : int
Size of word embeddings
word_pad_len : int
Length of the padded sequence
fine_tune : bool
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
hidden_size : int
Size of position-wise feed forward network
n_heads : int
Number of attention heads
n_encoders : int
Number of encoder layers
dropout : float
Dropout
References
----------
1. "`Attention Is All You Need. <https://arxiv.org/abs/1706.03762>`_" \
Ashish Vaswani, et al. NIPS 2017.
"""
def __init__(
self,
n_classes: int,
vocab_size: int,
embeddings: torch.Tensor,
d_model: torch.Tensor,
word_pad_len: int,
fine_tune: bool,
hidden_size: int,
n_heads: int,
n_encoders: int,
dropout: float = 0.5
) -> None:
super(Transformer, self).__init__()
# embedding layer
self.embeddings = nn.Embedding(vocab_size, d_model)
self.set_embeddings(embeddings, fine_tune)
# postional coding layer
self.postional_encoding = PositionalEncoding(d_model, word_pad_len, dropout)
# an encoder layer
self.encoder = EncoderLayer(d_model, n_heads, hidden_size, dropout)
# encoder is composed of a stack of n_encoders identical encoder layers
self.encoders = nn.ModuleList([
copy.deepcopy(self.encoder) for _ in range(n_encoders)
])
# classifier
self.fc = nn.Linear(word_pad_len * d_model, n_classes)
def set_embeddings(self, embeddings: torch.Tensor, fine_tune: bool = True) -> None:
"""
Set weights for embedding layer
Parameters
----------
embeddings : torch.Tensor
Word embeddings
fine_tune : bool, optional, default=True
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
"""
if embeddings is None:
# initialize embedding layer with the uniform distribution
self.embeddings.weight.data.uniform_(-0.1, 0.1)
else:
# initialize embedding layer with pre-trained embeddings
self.embeddings.weight = nn.Parameter(embeddings, requires_grad = fine_tune)
def forward(self, text: torch.Tensor, words_per_sentence: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
text : torch.Tensor (batch_size, word_pad_len)
Input data
words_per_sentence : torch.Tensor (batch_size)
Sentence lengths
Returns
-------
scores : torch.Tensor (batch_size, n_classes)
Class scores
"""
# get padding mask
mask = get_padding_mask(text)
# word embedding
embeddings = self.embeddings(text) # (batch_size, word_pad_len, emb_size)
embeddings = self.postional_encoding(embeddings)
encoder_out = embeddings
for encoder in self.encoders:
encoder_out, _ = encoder(encoder_out, mask = mask) # (batch_size, word_pad_len, d_model)
encoder_out = encoder_out.view(encoder_out.size(0), -1) # (batch_size, word_pad_len * d_model)
scores = self.fc(encoder_out) # (batch_size, n_classes)
return scores #, att
| 4,632 | 28.509554 | 105 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/models/Transformer/pe.py | import torch
import torch.nn as nn
import numpy as np
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class PositionalEncoding(nn.Module):
"""
Positional Encoding
Parameters
----------
d_model : int
Size of word embeddings
word_pad_len : int
Length of the padded sentence
dropout : float
Dropout
"""
def __init__(self, d_model: int, word_pad_len: int, dropout: float) -> None:
super(PositionalEncoding, self).__init__()
self.pe = torch.tensor([
[pos / (10000.0 ** (i // 2 * 2.0 / d_model)) for i in range(d_model)]
for pos in range(word_pad_len)
]) # (batch_size, word_pad_len, emb_size)
# PE(pos, 2i) = sin(pos / 10000^{2i / d_model})
self.pe[:, 0::2] = np.sin(self.pe[:, 0::2])
# PE(pos, 2i + 1) = cos(pos / 10000^{2i / d_model})
self.pe[:, 1::2] = np.cos(self.pe[:, 1::2])
self.dropout = nn.Dropout(dropout)
def forward(self, embeddings: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
embeddings : torch.Tensor (batch_size, word_pad_len, emb_size)
Word embeddings
Returns
-------
position encoded embeddings : torch.Tensor (batch_size, word_pad_len, emb_size)
Word Embeddings + Positional Encoding
"""
# word embeddings + positional encoding
embeddings = embeddings + nn.Parameter(self.pe, requires_grad=False).to(device)
embeddings = self.dropout(embeddings)
return embeddings
| 1,596 | 29.132075 | 87 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/models/Transformer/attention.py | import torch
import torch.nn as nn
from typing import Optional, Tuple
class ScaledDotProductAttention(nn.Module):
"""
Scaled Dot-Product Attention
Parameters
----------
scale : float
Scale factor (sqrt(d_k))
dropout : float
Dropout
"""
def __init__(self, scale: float, dropout: float = 0.5) -> None:
super(ScaledDotProductAttention, self).__init__()
self.scale = scale
self.dropout = nn.Dropout(dropout)
self.softmax = nn.Softmax(dim=-1)
def forward(
self,
Q: torch.Tensor,
K: torch.Tensor,
V: torch.Tensor,
mask: Optional[torch.Tensor] = None
):
"""
Parameters
----------
Q : torch.Tensor (batch_size, n_heads, word_pad_len, d_k)
Query
K : torch.Tensor
Key
V : torch.Tensor
Value
mask : torch.Tensor (batch_size, 1, 1, word_pad_len)
Padding mask metrix, None if it is not needed
Returns
-------
context : torch.Tensor (batch_size, n_heads, word_pad_len, d_k)
Context vector
att : torch.Tensor (batch_size, n_heads, word_pad_len, word_pad_len)
Attention weights
"""
# Q·K^T / sqrt(d_k)
att = torch.matmul(Q / self.scale, K.transpose(2, 3)) # (batch_size, n_heads, word_pad_len, word_pad_len)
# mask away by setting such weights to a large negative number, so that they evaluate to 0 under the softmax
if mask is not None:
att = att.masked_fill(mask == 0, -1e9)
# eq.1: Attention(Q, K, V) = softmax(Q·K^T / sqrt(d_k))·V
att = self.dropout(self.softmax(att)) # (batch_size, n_heads, word_pad_len, word_pad_len)
context = torch.matmul(att, V) # (batch_size, n_heads, word_pad_len, d_k)
return context, att
class MultiHeadAttention(nn.Module):
"""
Multi-Head Self-Attention
Parameters
----------
d_model : int
Size of word embeddings
n_heads : int
Number of attention heads
dropout : float
Dropout
"""
def __init__(self, d_model: int, n_heads: int, dropout: float = 0.5) -> None:
super(MultiHeadAttention, self).__init__()
assert d_model % n_heads == 0
# we assume d_v always equals d_k
self.d_k = d_model // n_heads
self.n_heads = n_heads
# linear projections
self.W_Q = nn.Linear(d_model, n_heads * self.d_k)
self.W_K = nn.Linear(d_model, n_heads * self.d_k)
self.W_V = nn.Linear(d_model, n_heads * self.d_k)
# scaled dot-product attention
scale = self.d_k ** 0.5 # scale factor
self.attention = ScaledDotProductAttention(scale=scale)
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_heads * self.d_k, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Parameters
----------
x : torch.Tensor (batch_size, word_pad_len, d_model)
Input data
mask : torch.Tensor (batch_size, 1, word_pad_len)
Padding mask metrix, None if it is not needed
Returns
-------
out : torch.Tensor (batch_size, word_pad_len, d_model)
Output of multi-head self-attention network
att: torch.Tensor (batch_size, n_heads, word_pad_len, word_pad_len)
Attention weights
"""
batch_size = x.size(0)
Q = self.W_Q(x) # (batch_size, word_pad_len, n_heads * d_k)
K = self.W_K(x)
V = self.W_V(x)
Q = Q.view(batch_size, -1, self.n_heads, self.d_k) # (batch_size, word_pad_len, n_heads, d_k)
K = K.view(batch_size, -1, self.n_heads, self.d_k)
V = V.view(batch_size, -1, self.n_heads, self.d_k)
Q, K, V = Q.transpose(1, 2), K.transpose(1, 2), V.transpose(1, 2) # (batch_size, n_heads, word_pad_len, d_k)
# for n_heads axis broadcasting
if mask is not None:
mask = mask.unsqueeze(1) # (batch_size, 1, 1, d_k)
context, att = self.attention(Q, K, V, mask=mask) # (batch_size, n_heads, word_pad_len, d_k)
context = context.transpose(1, 2).contiguous().view(batch_size, -1, self.d_k * self.n_heads) # (batch_size, word_pad_len, n_heads * d_k)
out = self.dropout(self.fc(context)) # (batch_size, word_pad_len, d_model)
out = out + x # residual connection
out = self.layer_norm(out) # LayerNorm
return out, att
| 4,635 | 29.906667 | 145 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/models/AttBiLSTM/att_bilstm.py | import torch
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, PackedSequence
from .attention import Attention
class AttBiLSTM(nn.Module):
"""
Implementation of Attention-based bidirectional LSTM proposed in paper [1].
Parameters
----------
n_classes : int
Number of classes
vocab_size : int
Number of words in the vocabulary
embeddings : torch.Tensor
Word embedding weights
emb_size : int
Size of word embeddings
fine_tune : bool
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
rnn_size : int
Size of Bi-LSTM
rnn_layers : int
Number of layers in Bi-LSTM
dropout : float
Dropout
References
----------
1. "`Attention-Based Bidirectional Long Short-Term Memory Networks for Relation Classification. \
<https://www.aclweb.org/anthology/P16-2034.pdf>`_" Peng Zhou, et al. ACL 2016.
"""
def __init__(
self,
n_classes: int,
vocab_size: int,
embeddings: torch.Tensor,
emb_size: int,
fine_tune: bool,
rnn_size: int,
rnn_layers: int,
dropout: float
) -> None:
super(AttBiLSTM, self).__init__()
self.rnn_size = rnn_size
# embedding layer
self.embeddings = nn.Embedding(vocab_size, emb_size)
self.set_embeddings(embeddings, fine_tune)
# bidirectional LSTM
self.BiLSTM = nn.LSTM(
emb_size, rnn_size,
num_layers = rnn_layers,
bidirectional = True,
dropout = (0 if rnn_layers == 1 else dropout),
batch_first = True
)
self.attention = Attention(rnn_size)
self.fc = nn.Linear(rnn_size, n_classes)
self.tanh = nn.Tanh()
self.dropout = nn.Dropout(dropout)
self.softmax = nn.Softmax(dim=1)
def set_embeddings(self, embeddings: torch.Tensor, fine_tune: bool = True) -> None:
"""
Set weights for embedding layer
Parameters
----------
embeddings : torch.Tensor
Word embeddings
fine_tune : bool, optional, default=True
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
"""
if embeddings is None:
# initialize embedding layer with the uniform distribution
self.embeddings.weight.data.uniform_(-0.1, 0.1)
else:
# initialize embedding layer with pre-trained embeddings
self.embeddings.weight = nn.Parameter(embeddings, requires_grad=fine_tune)
def forward(self, text: torch.Tensor, words_per_sentence: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
text : torch.Tensor (batch_size, word_pad_len)
Input data
words_per_sentence : torch.Tensor (batch_size)
Sentence lengths
Returns
-------
scores : torch.Tensor (batch_size, n_classes)
Class scores
"""
# word embedding, apply dropout
embeddings = self.dropout(self.embeddings(text)) # (batch_size, word_pad_len, emb_size)
# pack sequences (remove word-pads, SENTENCES -> WORDS)
packed_words = pack_padded_sequence(
embeddings,
lengths = words_per_sentence.tolist(),
batch_first = True,
enforce_sorted = False
) # a PackedSequence object, where 'data' is the flattened words (n_words, emb_size)
# run through bidirectional LSTM (PyTorch automatically applies it on the PackedSequence)
rnn_out, _ = self.BiLSTM(packed_words) # a PackedSequence object, where 'data' is the output of the LSTM (n_words, 2 * rnn_size)
# unpack sequences (re-pad with 0s, WORDS -> SENTENCES)
rnn_out, _ = pad_packed_sequence(rnn_out, batch_first = True) # (batch_size, word_pad_len, 2 * word_rnn_size)
# eq.8: h_i = [\overrightarrow{h}_i ⨁ \overleftarrow{h}_i ]
# H = {h_1, h_2, ..., h_T}
H = rnn_out[ :, :, : self.rnn_size] + rnn_out[ :, :, self.rnn_size : ] # (batch_size, word_pad_len, rnn_size)
# attention module
r, alphas = self.attention(H) # (batch_size, rnn_size), (batch_size, word_pad_len)
# eq.12: h* = tanh(r)
h = self.tanh(r) # (batch_size, rnn_size)
scores = self.fc(self.dropout(h)) # (batch_size, n_classes)
return scores #, alphas
| 4,575 | 31 | 137 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/models/AttBiLSTM/attention.py | import torch
from torch import nn
from typing import Tuple
class Attention(nn.Module):
"""
Attention network
Parameters
----------
rnn_size : int
Size of Bi-LSTM
"""
def __init__(self, rnn_size: int) -> None:
super(Attention, self).__init__()
self.w = nn.Linear(rnn_size, 1)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax(dim=1)
def forward(self, H: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Parameters
----------
H : torch.Tensor (batch_size, word_pad_len, hidden_size)
Output of Bi-LSTM
Returns
-------
r : torch.Tensor (batch_size, rnn_size)
Sentence representation
alpha : torch.Tensor (batch_size, word_pad_len)
Attention weights
"""
# eq.9: M = tanh(H)
M = self.tanh(H) # (batch_size, word_pad_len, rnn_size)
# eq.10: α = softmax(w^T M)
alpha = self.w(M).squeeze(2) # (batch_size, word_pad_len)
alpha = self.softmax(alpha) # (batch_size, word_pad_len)
# eq.11: r = H
r = H * alpha.unsqueeze(2) # (batch_size, word_pad_len, rnn_size)
r = r.sum(dim = 1) # (batch_size, rnn_size)
return r, alpha
| 1,279 | 26.234043 | 76 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/models/HAN/word_encoder.py | import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, PackedSequence
from typing import Tuple
class WordEncoder(nn.Module):
"""
Word-level attention module
Parameters
----------
vocab_size : int
Number of words in the vocabulary
embeddings : torch.Tensor
Word embedding weights
emb_size : int
Size of word embeddings
fine_tune : bool
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
word_rnn_size : int
Size of (bidirectional) word-level RNN
word_rnn_layers : int
Number of layers in word-level RNN
word_att_size : int
Size of word-level attention layer
dropout : float
Dropout
"""
def __init__(
self,
vocab_size: int,
embeddings: torch.Tensor,
emb_size: int,
fine_tune: bool,
word_rnn_size: int,
word_rnn_layers: int,
word_att_size: int,
dropout: float
) -> None:
super(WordEncoder, self).__init__()
# word embedding layer
self.embeddings = nn.Embedding(vocab_size, emb_size)
self.set_embeddings(embeddings, fine_tune)
# word-level RNN (bidirectional GRU)
self.word_rnn = nn.GRU(
emb_size, word_rnn_size,
num_layers = word_rnn_layers,
bidirectional = True,
dropout = (0 if word_rnn_layers == 1 else dropout),
batch_first = True
)
# word-level attention network
self.W_w = nn.Linear(2 * word_rnn_size, word_att_size)
# word context vector u_w
self.u_w = nn.Linear(word_att_size, 1, bias=False)
self.dropout = nn.Dropout(dropout)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax(dim=1)
def set_embeddings(self, embeddings: torch.Tensor, fine_tune: bool = True) -> None:
"""
Set weights for embedding layer
Parameters
----------
embeddings : torch.Tensor
Word embeddings
fine_tune : bool, optional, default=True
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
"""
if embeddings is None:
# initialize embedding layer with the uniform distribution
self.embeddings.weight.data.uniform_(-0.1, 0.1)
else:
# initialize embedding layer with pre-trained embeddings
self.embeddings.weight = nn.Parameter(embeddings, requires_grad = fine_tune)
def forward(
self, sentences: torch.Tensor, words_per_sentence: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Parameters
----------
sentences : torch.Tensor (n_sentences, word_pad_len, emb_size)
Encoded sentence-level data
words_per_sentence : torch.Tensor (n_sentences)
Sentence lengths
Returns
-------
sentences : torch.Tensor
Sentence embeddings
word_alphas : torch.Tensor
Attention weights on each word
"""
# word embedding, apply dropout
sentences = self.dropout(self.embeddings(sentences)) # (n_sentences, word_pad_len, emb_size)
# pack sequences (remove word-pads, SENTENCES -> WORDS)
packed_words = pack_padded_sequence(
sentences,
lengths = words_per_sentence.tolist(),
batch_first = True,
enforce_sorted = False
) # a PackedSequence object, where 'data' is the flattened words (n_words, word_emb)
# run through word-level RNN (PyTorch automatically applies it on the PackedSequence)
packed_words, _ = self.word_rnn(packed_words) # a PackedSequence object, where 'data' is the output of the RNN (n_words, 2 * word_rnn_size)
# unpack sequences (re-pad with 0s, WORDS -> SENTENCES)
# we do unpacking here because attention weights have to be computed only over words in the same sentence
sentences, _ = pad_packed_sequence(packed_words, batch_first=True) # (n_sentences, max(words_per_sentence), 2 * word_rnn_size)
# word-level attention
# eq.5: u_it = tanh(W_w h_it + b_w)
u_it = self.W_w(sentences) # (n_sentences, max(words_per_sentence), att_size)
u_it = self.tanh(u_it) # (n_sentences, max(words_per_sentence), att_size)
# eq.6: alpha_it = softmax(u_it u_w)
word_alphas = self.u_w(u_it).squeeze(2) # (n_sentences, max(words_per_sentence))
word_alphas = self.softmax(word_alphas) # (n_sentences, max(words_per_sentence))
# form sentence vectors
# eq.7: s_i = \sum_t α_it h_it
sentences = sentences * word_alphas.unsqueeze(2) # (n_sentences, max(words_per_sentence), 2 * word_rnn_size)
sentences = sentences.sum(dim=1) # (n_sentences, 2 * word_rnn_size)
return sentences, word_alphas
| 5,016 | 33.363014 | 147 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/models/HAN/han.py | import torch
import torch.nn as nn
from typing import Tuple
from .sent_encoder import *
class HAN(nn.Module):
"""
Implementation of Hierarchial Attention Network (HAN) proposed in paper [1].
Parameters
----------
n_classes : int
Number of classes
vocab_size : int
Number of words in the vocabulary
embeddings : torch.Tensor
Word embedding weights
emb_size : int
Size of word embeddings
fine_tune : bool
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
word_rnn_size : int
Size of (bidirectional) word-level RNN
sentence_rnn_size : int
Size of (bidirectional) sentence-level RNN
word_rnn_layers : int
Number of layers in word-level RNN
sentence_rnn_layers : int
Number of layers in sentence-level RNN
word_att_size : int
Size of word-level attention layer
sentence_att_size : int
Size of sentence-level attention layer
dropout : float, optional, default=0.5
Dropout
"""
def __init__(
self,
n_classes: int,
vocab_size: int,
embeddings: torch.Tensor,
emb_size: int,
fine_tune: bool,
word_rnn_size: int,
sentence_rnn_size: int,
word_rnn_layers: int,
sentence_rnn_layers: int,
word_att_size: int,
sentence_att_size: int,
dropout: float = 0.5
) -> None:
super(HAN, self).__init__()
# sentence encoder
self.sentence_encoder = SentenceEncoder(
vocab_size, embeddings, emb_size, fine_tune,
word_rnn_size, sentence_rnn_size,
word_rnn_layers, sentence_rnn_layers,
word_att_size, sentence_att_size,
dropout
)
# classifier
self.fc = nn.Linear(2 * sentence_rnn_size, n_classes)
self.dropout = nn.Dropout(dropout)
def forward(
self,
documents: torch.Tensor,
sentences_per_document: torch.Tensor,
words_per_sentence: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters
----------
documents : torch.Tensor (n_documents, sent_pad_len, word_pad_len)
Encoded document-level data
sentences_per_document : torch.Tensor (n_documents)
Document lengths
words_per_sentence : torch.Tensor (n_documents, sent_pad_len)
Sentence lengths
Returns
-------
scores : torch.Tensor (batch_size, n_classes)
Class scores
word_alphas : torch.Tensor
Attention weights on each word
sentence_alphas : torch.Tensor
Attention weights on each sentence
"""
# sentence encoder, get document vectors
document_embeddings, word_alphas, sentence_alphas = self.sentence_encoder(
documents,
sentences_per_document,
words_per_sentence
) # (n_documents, 2 * sentence_rnn_size), (n_documents, max(sentences_per_document), max(words_per_sentence)), (n_documents, max(sentences_per_document))
# classify
# eq.11: p = softmax(W_c v + b_c)
scores = self.fc(self.dropout(document_embeddings)) # (n_documents, n_classes)
return scores, word_alphas, sentence_alphas
| 3,401 | 26.885246 | 162 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/models/HAN/sent_encoder.py | import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, PackedSequence
from typing import Tuple
from .word_encoder import WordEncoder
class SentenceEncoder(nn.Module):
"""
Sentence-level attention module
Parameters
----------
vocab_size : int
Number of words in the vocabulary
embeddings : torch.Tensor
Word embedding weights
emb_size : int
Size of word embeddings
fine_tune : bool
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
word_rnn_size : int
Size of (bidirectional) word-level RNN
sentence_rnn_size : int
Size of (bidirectional) sentence-level RNN
word_rnn_layers : int
Number of layers in word-level RNN
sentence_rnn_layers : int
Number of layers in sentence-level RNN
word_att_size : int
Size of word-level attention layer
sentence_att_size : int
Size of sentence-level attention layer
dropout : float
Dropout
"""
def __init__(
self,
vocab_size: int,
embeddings: torch.Tensor,
emb_size: int,
fine_tune: bool,
word_rnn_size: int,
sentence_rnn_size: int,
word_rnn_layers: int,
sentence_rnn_layers: int,
word_att_size: int,
sentence_att_size: int,
dropout: float
) -> None:
super(SentenceEncoder, self).__init__()
# word encoder
self.word_encoder = WordEncoder(
vocab_size = vocab_size,
embeddings = embeddings,
emb_size = emb_size,
fine_tune = fine_tune,
word_rnn_size = word_rnn_size,
word_rnn_layers = word_rnn_layers,
word_att_size = word_att_size,
dropout = dropout
)
# sentence-level RNN (bidirectional GRU)
self.sentence_rnn = nn.GRU(
2 * word_rnn_size, sentence_rnn_size,
num_layers = sentence_rnn_layers,
bidirectional = True,
dropout = (0 if sentence_rnn_layers == 1 else dropout),
batch_first = True
)
# sentence-level attention network
self.W_s = nn.Linear(2 * sentence_rnn_size, sentence_att_size)
# sentence context vector u_s to take dot-product with
self.u_s = nn.Linear(sentence_att_size, 1, bias=False) # this performs a dot product with the linear layer's 1D parameter vector, which is the sentence context vector
# dropout
self.dropout = nn.Dropout(dropout)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax(dim=1)
def forward(
self,
documents: torch.Tensor,
sentences_per_document: torch.Tensor,
words_per_sentence: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters
----------
documents : torch.Tensor (n_documents, sent_pad_len, word_pad_len)
Encoded document-level data
sentences_per_document : torch.Tensor (n_documents)
Document lengths
words_per_sentence : torch.Tensor (n_documents, sent_pad_len)
Sentence lengths
Returns
-------
documents : torch.Tensor
Document embeddings
word_alphas : torch.Tensor
Attention weights on each word
sentence_alphas : torch.Tensor
Attention weights on each sentence
"""
# pack sequences (remove word-pads, DOCUMENTS -> SENTENCES)
packed_sentences = pack_padded_sequence(
documents,
lengths = sentences_per_document.tolist(),
batch_first = True,
enforce_sorted = False
) # a PackedSequence object, where 'data' is the flattened sentences (n_sentences, word_pad_len)
# re-arrange sentence lengths in the same way (DOCUMENTS -> SENTENCES)
packed_words_per_sentence = pack_padded_sequence(
words_per_sentence,
lengths = sentences_per_document.tolist(),
batch_first = True,
enforce_sorted = False
) # a PackedSequence object, where 'data' is the flattened sentence lengths (n_sentences)
# word encoder, get sentence vectors
sentences, word_alphas = self.word_encoder(
packed_sentences.data,
packed_words_per_sentence.data
) # (n_sentences, 2 * word_rnn_size), (n_sentences, max(words_per_sentence))
sentences = self.dropout(sentences)
# run through sentence-level RNN (PyTorch automatically applies it on the PackedSequence)
packed_sentences, _ = self.sentence_rnn(PackedSequence(
data = sentences,
batch_sizes = packed_sentences.batch_sizes,
sorted_indices = packed_sentences.sorted_indices,
unsorted_indices = packed_sentences.unsorted_indices
)) # a PackedSequence object, where 'data' is the output of the RNN (n_sentences, 2 * sentence_rnn_size)
# unpack sequences (re-pad with 0s, SENTENCES -> DOCUMENTS)
# we do unpacking here because attention weights have to be computed only over sentences in the same document
documents, _ = pad_packed_sequence(packed_sentences, batch_first=True) # (n_documents, max(sentences_per_document), 2 * sentence_rnn_size)
# sentence-level attention
# eq.8: u_i = tanh(W_s h_i + b_s)
u_i = self.W_s(documents) # (n_documents, max(sentences_per_document), att_size)
u_i = self.tanh(u_i) # (n_documents, max(sentences_per_document), att_size)
# eq.9: alpha_i = softmax(u_i u_s)
sent_alphas = self.u_s(u_i).squeeze(2) # (n_documents, max(sentences_per_document))
sent_alphas = self.softmax(sent_alphas) # (n_documents, max(sentences_per_document))
# form document vectors
# eq.10: v = \sum_i α_i h_i
documents = documents * sent_alphas.unsqueeze(2) # (n_documents, max(sentences_per_document), 2 * sentence_rnn_size)
documents = documents.sum(dim=1) # (n_documents, 2 * sentence_rnn_size)
# also re-arrange word_alphas (SENTENCES -> DOCUMENTS)
word_alphas, _ = pad_packed_sequence(PackedSequence(
data = word_alphas,
batch_sizes = packed_sentences.batch_sizes,
sorted_indices = packed_sentences.sorted_indices,
unsorted_indices = packed_sentences.unsorted_indices
), batch_first = True) # (n_documents, max(sentences_per_document), max(words_per_sentence))
return documents, word_alphas, sent_alphas
| 6,680 | 35.508197 | 175 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/models/fastText/fasttext.py | import torch
from torch import nn
class fastText(nn.Module):
"""
Implementation of fastText proposed in paper [1].
`Here <https://github.com/facebookresearch/fastText>`_ is the official
implementation of fastText.
Parameters
----------
n_classes : int
Number of classes
vocab_size : int
Number of words in the vocabulary
embeddings : torch.Tensor
Word embedding weights
emb_size : int
Size of word embeddings
fine_tune : bool
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
hidden_size : int
Size of the hidden layer
References
----------
1. "`Bag of Tricks for Efficient Text Classification. \
<https://arxiv.org/abs/1607.01759>`_" Armand Joulin, et al. EACL 2017.
"""
def __init__(
self,
n_classes: int,
vocab_size: int,
embeddings: torch.Tensor,
emb_size: int,
fine_tune: bool,
hidden_size: int
) -> None:
super(fastText, self).__init__()
# embedding layer
self.embeddings = nn.Embedding(vocab_size, emb_size)
self.set_embeddings(embeddings, fine_tune)
# hidden layer
self.hidden = nn.Linear(emb_size, hidden_size)
# output layer
self.fc = nn.Linear(hidden_size, n_classes)
def set_embeddings(self, embeddings: torch.Tensor, fine_tune: bool = True) -> None:
"""
Set weights for embedding layer
Parameters
----------
embeddings : torch.Tensor
Word embeddings
fine_tune : bool, optional, default=True
Allow fine-tuning of embedding layer? (only makes sense when using
pre-trained embeddings)
"""
if embeddings is None:
# initialize embedding layer with the uniform distribution
self.embeddings.weight.data.uniform_(-0.1, 0.1)
else:
# initialize embedding layer with pre-trained embeddings
self.embeddings.weight = nn.Parameter(embeddings, requires_grad=fine_tune)
def forward(self, text: torch.Tensor, words_per_sentence: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
text : torch.Tensor (batch_size, word_pad_len)
Input data
words_per_sentence : torch.Tensor (batch_size)
Sentence lengths
Returns
-------
scores : torch.Tensor (batch_size, n_classes)
Class scores
"""
# word embedding
embeddings = self.embeddings(text) # (batch_size, word_pad_len, emb_size)
# average word embeddings in to sentence erpresentations
avg_embeddings = embeddings.mean(dim=1).squeeze(1) # (batch_size, emb_size)
hidden = self.hidden(avg_embeddings) # (batch_size, hidden_size)
# compute probability
scores = self.fc(hidden) # (batch_size, n_classes)
return scores
| 3,017 | 28.019231 | 92 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/datasets/dataloader.py | """
Load data from manually preprocessed data (see ``datasets/prepocess/``).
"""
import os
import json
from typing import Dict, Tuple, Union
import torch
from torch.utils.data import Dataset, DataLoader
from utils import load_embeddings
from utils.opts import Config
from .info import get_label_map
import sys
sys.path.append("../../nlpformat")
sys.path.append("../")
sys.path.append(".")
from nlpformat import in_mem_bismarck, in_mem_block, in_mem_block_only, in_mem_no_shuffle, in_mem_sliding_window, in_mem_once_fully_shuffle
'''
class DocDataset(Dataset):
"""
A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches
(for document classification).
Parameters
----------
data_folder : str
Path to folder where data files are stored
split : str
Split, one of 'TRAIN' or 'TEST'
"""
def __init__(self, data_folder: str, split: str) -> None:
split = split.upper()
assert split in {'TRAIN', 'TEST'}
self.split = split
# load data
self.data = torch.load(os.path.join(data_folder, split + '_data.pth.tar'))
def __getitem__(self, i: int) -> Tuple[torch.LongTensor, torch.LongTensor, torch.LongTensor]:
return torch.LongTensor(self.data['docs'][i]), \
torch.LongTensor([self.data['sentences_per_document'][i]]), \
torch.LongTensor(self.data['words_per_sentence'][i]), \
torch.LongTensor([self.data['labels'][i]])
def __len__(self) -> int:
return len(self.data['labels'])
class SentDataset(Dataset):
"""
A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches
(for sentence classification).
Parameters
----------
data_folder : str
Path to folder where data files are stored
split : str
Split, one of 'TRAIN' or 'TEST'
"""
def __init__(self, data_folder: str, split: str) -> None:
split = split.upper()
assert split in {'TRAIN', 'TEST'}
self.split = split
# load data
self.data = torch.load(os.path.join(data_folder, split + '_data.pth.tar'))
def __getitem__(self, i: int) -> Tuple[torch.LongTensor, torch.LongTensor, torch.LongTensor]:
return torch.LongTensor(self.data['sents'][i]), \
torch.LongTensor([self.data['words_per_sentence'][i]]), \
torch.LongTensor([self.data['labels'][i]])
def __len__(self) -> int:
return len(self.data['labels'])
'''
def load_data(args,
config: Config, split: str, build_vocab: bool = True
) -> Union[DataLoader, Tuple[DataLoader, torch.Tensor, int, Dict[str, int], int, int]]:
"""
Load data from files output by ``prepocess.py``.
Parameters
----------
config : Config
Configuration settings
split : str
'trian' / 'test'
build_vocab : bool
Build vocabulary or not. Only makes sense when split = 'train'.
Returns
-------
split = 'test':
test_loader : DataLoader
Dataloader for test data
split = 'train':
build_vocab = Flase:
train_loader : DataLoader
Dataloader for train data
build_vocab = True:
train_loader : DataLoader
Dataloader for train data
embeddings : torch.Tensor
Pre-trained word embeddings (None if config.emb_pretrain = False)
emb_size : int
Embedding size (config.emb_size if config.emb_pretrain = False)
word_map : Dict[str, int]
Word2ix map
n_classes : int
Number of classes
vocab_size : int
Size of vocabulary
"""
split = split.lower()
assert split in {'train', 'test'}
data_folder = config.output_path
num_workers = args['num_workers']
shuffle_mode = args['shuffle_mode']
model_name = args['model_name']
batch_size = args['batch_size']
data_name = args['data_name']
sliding_window_size_ratio = args['sliding_window_size_ratio']
bismarck_buffer_size_ratio = args['bismarck_buffer_size_ratio']
if (shuffle_mode == 'bismarck_mrs'):
select_ratio_from_old_buffer = args['select_ratio_from_old_buffer']
block_num = args['block_num']
buffer_size_ratio = args['buffer_size_ratio']
use_clustered_data = args['use_clustered_data']
# test
if split == 'test':
if (model_name == 'han'):
testset = in_mem_no_shuffle.InMemNoShuffleDocDataset(
data_folder=data_folder,
split=split,
use_clustered_data=use_clustered_data
)
else:
testset = in_mem_no_shuffle.InMemNoShuffleSentDataset(
data_folder=data_folder,
split=split,
use_clustered_data=use_clustered_data
)
test_tuple_num = len(testset)
test_loader = DataLoader(
testset,
batch_size = batch_size,
shuffle = False,
num_workers = num_workers,
pin_memory = True
)
return test_loader, test_tuple_num
# train
else:
if (model_name == 'han'):
if (shuffle_mode == 'once_shuffle' or shuffle_mode == 'once_fully'):
trainset = in_mem_once_fully_shuffle.InMemOnceFullyShuffleDocDataset(
data_folder=data_folder,
split=split,
use_clustered_data=use_clustered_data
)
testset = trainset
elif (shuffle_mode == 'no_shuffle'):
trainset = in_mem_no_shuffle.InMemNoShuffleDocDataset(
data_folder=data_folder,
split=split,
use_clustered_data=use_clustered_data
)
testset = trainset
elif (shuffle_mode == 'bismarck_mrs'):
old_buffer_checkpoint_dir = args['old_buffer_checkpoint_dir']
trainset = in_mem_bismarck.InMemBismarckDocDataset(
data_folder=data_folder,
split=split,
use_clustered_data=use_clustered_data,
bismarck_buffer_size_ratio=bismarck_buffer_size_ratio,
select_ratio_from_old_buffer=select_ratio_from_old_buffer,
old_buffer_checkpoint_dir=old_buffer_checkpoint_dir)
testset = in_mem_no_shuffle.InMemNoShuffleDocDataset(
data_folder=data_folder,
split=split,
use_clustered_data=use_clustered_data
)
elif (shuffle_mode == 'block'):
trainset = in_mem_block.InMemBlockDocDataset(
data_folder=data_folder,
split=split,
use_clustered_data=use_clustered_data,
block_num=block_num,
buffer_size_ratio=buffer_size_ratio,
drop_last=False
)
testset = trainset
elif (shuffle_mode == 'block_only'):
trainset = in_mem_block_only.InMemBlockOnlyDocDataset(
data_folder=data_folder,
split=split,
use_clustered_data=use_clustered_data,
block_num=block_num,
buffer_size_ratio=buffer_size_ratio,
drop_last=False)
testset = trainset
elif (shuffle_mode == 'sliding_window'):
trainset = in_mem_sliding_window.InMemSlidingWindowDocDataset(
data_folder=data_folder,
split=split,
use_clustered_data=use_clustered_data,
sliding_window_size_ratio=sliding_window_size_ratio,
)
testset = trainset
else:
if (shuffle_mode == 'once_shuffle' or shuffle_mode == 'once_fully'):
trainset = in_mem_once_fully_shuffle.InMemOnceFullyShuffleSentDataset(
data_folder=data_folder,
split=split,
use_clustered_data=use_clustered_data
)
testset = trainset
elif (shuffle_mode == 'no_shuffle'):
trainset = in_mem_no_shuffle.InMemNoShuffleSentDataset(
data_folder=data_folder,
split=split,
use_clustered_data=use_clustered_data
)
testset = trainset
elif (shuffle_mode == 'bismarck_mrs'):
old_buffer_checkpoint_dir = args['old_buffer_checkpoint_dir']
trainset = in_mem_bismarck.InMemBismarckSentDataset(
data_folder=data_folder,
split=split,
use_clustered_data=use_clustered_data,
bismarck_buffer_size_ratio=bismarck_buffer_size_ratio,
select_ratio_from_old_buffer=select_ratio_from_old_buffer,
old_buffer_checkpoint_dir=old_buffer_checkpoint_dir)
testset = in_mem_no_shuffle.InMemNoShuffleSentDataset(
data_folder=data_folder,
split=split,
use_clustered_data=use_clustered_data
)
elif (shuffle_mode == 'block'):
trainset = in_mem_block.InMemBlockSentDataset(
data_folder=data_folder,
split=split,
use_clustered_data=use_clustered_data,
block_num=block_num,
buffer_size_ratio=buffer_size_ratio,
drop_last=False
)
testset = trainset
elif (shuffle_mode == 'block_only'):
trainset = in_mem_block_only.InMemBlockOnlySentDataset(
data_folder=data_folder,
split=split,
use_clustered_data=use_clustered_data,
block_num=block_num,
buffer_size_ratio=buffer_size_ratio,
drop_last=False)
testset = trainset
elif (shuffle_mode == 'sliding_window'):
trainset = in_mem_sliding_window.InMemSlidingWindowSentDataset(
data_folder=data_folder,
split=split,
use_clustered_data=use_clustered_data,
sliding_window_size_ratio=sliding_window_size_ratio,
)
testset = trainset
train_tuple_num = len(trainset)
# writer.write('[Computed] train_tuple_num = %d' % (train_tuple_num))
# writer.write('\n')
# dataloaders
train_loader = DataLoader(
trainset,
batch_size = batch_size,
shuffle = False,
num_workers = num_workers,
pin_memory = True
)
test_loader = DataLoader(
testset,
batch_size = batch_size,
shuffle = False,
num_workers = num_workers,
pin_memory = True
)
if build_vocab == False:
return train_loader
else:
# load word2ix map
with open(os.path.join(data_folder, 'word_map.json'), 'r') as j:
word_map = json.load(j)
# size of vocabulary
vocab_size = len(word_map)
# number of classes
label_map, _ = get_label_map(data_name)
n_classes = len(label_map)
# word embeddings
if config.emb_pretrain == True:
# load Glove as pre-trained word embeddings for words in the word map
emb_path = os.path.join(config.emb_folder, config.emb_filename)
embeddings, emb_size = load_embeddings(
emb_file = os.path.join(config.emb_folder, config.emb_filename),
word_map = word_map,
output_folder = config.output_path
)
# or initialize embedding weights randomly
else:
embeddings = None
emb_size = config.emb_size
return train_loader, test_loader, embeddings, emb_size, word_map, n_classes, vocab_size, train_tuple_num
| 12,615 | 33.659341 | 139 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/datasets/torchtext.py | '''
script for loading data for sentence classification using torchtext (never used)
I abandon this because torchtext loads all data in one go, which occupies
too much memory and slows down the training speed, expecially when the dataset
is big.
So I finally choose to preprocess data manually (see datasets/prepocess/) and
load data dynamically via Pytorch's Dataloader.
'''
import torch
from torchtext import data
from torchtext import datasets
from torchtext.vocab import Vectors, GloVe
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
'''
load data using torchtext (only for sentence classification)
input param:
config (Class): config settings
split: 'trian' / 'test'
build_vocab: build vocabulary?
only makes sense when split = 'train'
return:
split = 'test':
test_loader: data iterator for test data
split = 'train':
build_vocab = Flase:
train_loader: data iterator for train data
build_vocab = True:
train_loader: data iterator for train data
embeddings: pre-trained word embeddings (None if config.embed_pretrain = false)
emb_size: embedding size (config.emb_size if config.embed_pretrain = false)
word_map: word2ix map
n_classes: number of classes
vocab_size: size of vocabulary
'''
def load_data(config, split, build_vocab = True):
split = split.lower()
assert split in {'train', 'test'}
tokenizer = lambda x: x.split()
# Field: a class that storing information about the way of preprocessing
TEXT = data.Field(sequential = True, tokenize = tokenizer, lower = True, include_lengths = True, batch_first = True, fix_length = config.word_limit)
LABEL = data.Field(sequential = False, unk_token = None) # we don't need <unk> in label
# Yahoo Answers
if config.dataset == 'yahoo_answers':
# | label | text1 | text2 | text3 |
fields = [('label', LABEL), ('text1', TEXT), ('text2', TEXT), ('text3', TEXT)]
# Yelp Review Full, Yelp Review Polarity
elif config.dataset in ['yelp_full', 'yelp_polarity']:
# | label | text |
fields = [('label', LABEL), ('text', TEXT)]
# AG News, DBpedia, Amazon Review Full, Amazon Review Polarity
else:
# | label | we don't need | text |
fields = [('label', LABEL), (None, None), ('text', TEXT)]
# load data
train_data, test_data = data.TabularDataset.splits(
path = config.dataset_path,
train = 'train.csv',
test = 'test.csv',
format = 'csv',
fields = fields,
skip_header = False
)
# concatenate all text to a single piece of text
if config.dataset == 'yahoo_answers':
for train_item in train_data:
train_item.text = train_item.text1 + train_item.text2 + train_item.text3
del train_item.text1, train_item.text2, train_item.text3
train_data.fields = {
'label': LABEL,
'text': TEXT
}
if config.embed_pretrain == True:
# build word2ix map
# and load Glove as pre-trained word embeddings for words in the word map
vectors = Vectors(name = config.embed_filename, cache = config.embed_foler)
TEXT.build_vocab(train_data, vectors = vectors)
embeddings = TEXT.vocab.vectors
emb_size = TEXT.vocab.vectors.size()[1]
else:
# build word2ix map only
TEXT.build_vocab(train_data)
embeddings = None
emb_size = config.emb_size
# size of vocabulary
vocab_size = len(TEXT.vocab)
# number of classes
LABEL.build_vocab(train_data)
n_classes = len(LABEL.vocab)
# word map
word_map = TEXT.vocab.stoi
# BucketIterator: defines an iterator that batches examples of similar lengths together to minimize the amount of padding needed
train_loader, test_loader = data.BucketIterator.splits(
(train_data, test_data),
batch_size = config.batch_size,
sort_key = lambda x: len(x.text),
device = device,
repeat = False,
shuffle = True
)
if split == 'test':
return test_loader
else:
if build_vocab == False:
return train_loader
else:
return train_loader, embeddings, emb_size, word_map, n_classes, vocab_size | 4,413 | 34.596774 | 152 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/datasets/preprocess/document.py | """
Preprocess data for document classification.
"""
import torch
from typing import Tuple, Dict
from collections import Counter
from nltk.tokenize import PunktSentenceTokenizer, TreebankWordTokenizer
from tqdm import tqdm
import pandas as pd
import os
import json
from .utils import get_clean_text
# tokenizers
sent_tokenizer = PunktSentenceTokenizer()
word_tokenizer = TreebankWordTokenizer()
def read_csv(
csv_folder: str, split, sentence_limit: int, word_limit: int
) -> Tuple[list, list, Counter]:
"""
Read CSVs containing raw training data, clean documents and labels, and do
a word-count.
Parameters
----------
csv_folder : str
Folder containing the dataset in CSV format files
split : str
'train' or 'test' split?
sentence_limit : int
Truncate long documents to these many sentences
word_limit : int
Truncate long sentences to these many words
Returns
-------
docs : list
Documents ([ [word1a, ... ], ..., [wordna, ... ] ])
labels : list
List of label of each document
word_counter : Counter
"""
assert split in {'train', 'test'}
docs = []
labels = []
word_counter = Counter()
data = pd.read_csv(os.path.join(csv_folder, split + '.csv'), header = None)
for i in tqdm(range(data.shape[0])):
row = list(data.loc[i, :])
sentences = list()
for text in row[1:]:
for paragraph in get_clean_text(text).splitlines():
sentences.extend([s for s in sent_tokenizer.tokenize(paragraph)])
words = list()
for s in sentences[:sentence_limit]:
w = word_tokenizer.tokenize(s)[:word_limit]
# if sentence is empty (due to removing punctuation, digits, etc.)
if len(w) == 0:
continue
words.append(w)
word_counter.update(w)
# if all sentences were empty
if len(words) == 0:
continue
labels.append(int(row[0]) - 1) # since labels are 1-indexed in the CSV
docs.append(words)
return docs, labels, word_counter
def encode_and_pad(
input_docs: list, word_map: Dict[str, int], sentence_limit: int, word_limit: int
) -> Tuple[list, list, list]:
"""
Encode sentences, and pad them to fit word_limit.
Parameters
----------
input_docs : list
Documents ([ [word1a, ... ], ..., [wordna, ... ] ])
word_map : Dict[str, int]
Word2ix map
sentence_limit : int
Max number of sentences in a document
word_limit : int
Max number of words in a sentence
Returns
-------
encoded_docs : list
Encoded and padded document
sentences_per_document : list
Number of sentences per document
words_per_sentence : list
Number of words per sentence
"""
encoded_docs = list(
map(lambda doc: list(
map(lambda s: list(
map(lambda w: word_map.get(w, word_map['<unk>']), s)
) + [0] * (word_limit - len(s)), doc)
) + [[0] * word_limit] * (sentence_limit - len(doc)), input_docs)
)
sentences_per_document = list(map(lambda doc: len(doc), input_docs))
words_per_sentence = list(
map(lambda doc: list(
map(lambda s: len(s), doc)
) + [0] * (sentence_limit - len(doc)), input_docs)
)
return encoded_docs, sentences_per_document, words_per_sentence
def run_prepro(
csv_folder: str,
output_folder: int,
sentence_limit: int,
word_limit: int,
min_word_count: int = 5
) -> None:
"""
Create data files to be used for training the model.
Parameters
----------
csv_folder : str
Folder where the CSVs with the raw data are located
output_folder : str
Folder where files must be created
sentence_limit : int
Truncate long documents to these many sentences
word_limit : int
Truncate long sentences to these many words
min_word_count : int
Discard rare words which occur fewer times than this number
"""
# --------------------- training data ---------------------
print('\nTraining data: reading and preprocessing...\n')
train_docs, train_labels, word_counter = read_csv(csv_folder, 'train', sentence_limit, word_limit)
# create word map
word_map = dict()
word_map['<pad>'] = 0
for word, count in word_counter.items():
if count >= min_word_count:
word_map[word] = len(word_map)
word_map['<unk>'] = len(word_map)
print('\nTraining data: discarding words with counts less than %d, the size of the vocabulary is %d.\n' % (min_word_count, len(word_map)))
# save word map
with open(os.path.join(output_folder, 'word_map.json'), 'w') as j:
json.dump(word_map, j)
print('Training data: word map saved to %s.\n' % os.path.abspath(output_folder))
# encode and pad
print('Training data: encoding and padding...\n')
encoded_train_docs, sentences_per_train_document, words_per_train_sentence = \
encode_and_pad(train_docs, word_map, sentence_limit, word_limit)
# save
print('Training data: saving...\n')
assert len(encoded_train_docs) == len(train_labels) == len(sentences_per_train_document) == len(words_per_train_sentence)
# because of the large data, saving as a JSON can be very slow
torch.save({
'docs': encoded_train_docs,
'labels': train_labels,
'sentences_per_document': sentences_per_train_document,
'words_per_sentence': words_per_train_sentence
}, os.path.join(output_folder, 'TRAIN_data.pth.tar'))
print('Training data: encoded, padded data saved to %s.\n' % os.path.abspath(output_folder))
# free some memory
del train_docs, encoded_train_docs, train_labels, sentences_per_train_document, words_per_train_sentence
# --------------------- test data ---------------------
print('Test data: reading and preprocessing...\n')
test_docs, test_labels, _ = read_csv(csv_folder, 'test', sentence_limit, word_limit)
# encode and pad
print('\nTest data: encoding and padding...\n')
encoded_test_docs, sentences_per_test_document, words_per_test_sentence = \
encode_and_pad(test_docs, word_map, sentence_limit, word_limit)
# save
print('Test data: saving...\n')
assert len(encoded_test_docs) == len(test_labels) == len(sentences_per_test_document) == len(words_per_test_sentence)
torch.save({
'docs': encoded_test_docs,
'labels': test_labels,
'sentences_per_document': sentences_per_test_document,
'words_per_sentence': words_per_test_sentence
}, os.path.join(output_folder, 'TEST_data.pth.tar'))
print('Test data: encoded, padded data saved to %s.\n' % os.path.abspath(output_folder))
print('All done!\n')
| 6,873 | 30.972093 | 142 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/datasets/preprocess/sentence.py | """
Preprocess data for sentence classification.
"""
import torch
from typing import Tuple, Dict
from collections import Counter
from nltk.tokenize import PunktSentenceTokenizer, TreebankWordTokenizer
from tqdm import tqdm
import pandas as pd
import os
import json
from .utils import get_clean_text
# tokenizers
word_tokenizer = TreebankWordTokenizer()
def read_csv(csv_folder: str, split: str, word_limit: int) -> Tuple[list, list, Counter]:
"""
Read CSVs containing raw training data, clean sentences and labels, and do
a word-count.
Parameters
----------
csv_folder : str
Folder containing the dataset in CSV format files
split : str
'train' or 'test' split?
word_limit : int
Truncate long sentences to these many words
Returns
-------
sents : list
Sentences ([ word1, ..., wordn ])
labels : list
List of label of each sentence
word_counter : Counter
"""
assert split in {'train', 'test'}
sents = []
labels = []
word_counter = Counter()
data = pd.read_csv(os.path.join(csv_folder, split + '.csv'), header = None)
for i in tqdm(range(data.shape[0])):
row = list(data.loc[i, :])
s = ''
for text in row[1:]:
text = get_clean_text(text)
s = s + text
words = word_tokenizer.tokenize(s)[:word_limit]
# if sentence is empty (due to removing punctuation, digits, etc.)
if len(words) == 0:
continue
word_counter.update(words)
labels.append(int(row[0]) - 1) # since labels are 1-indexed in the CSV
sents.append(words)
return sents, labels, word_counter
def encode_and_pad(
input_sents: list, word_map: Dict[str, int], word_limit: int
) -> Tuple[list, list]:
"""
Encode sentences, and pad them to fit word_limit.
Parameters
----------
input_sents : list
Sentences ([ word1, ..., wordn ])
word_map : Dict[str, int]
Word2ix map
word_limit : int
Max number of words in a sentence
Returns
-------
encoded_sents : list
Encoded and padded sentences
words_per_sentence : list
Number of words per sentence
"""
encoded_sents = list(
map(lambda s: list(
map(lambda w: word_map.get(w, word_map['<unk>']), s)
) + [0] * (word_limit - len(s)), input_sents)
)
words_per_sentence = list(map(lambda s: len(s), input_sents))
return encoded_sents, words_per_sentence
def run_prepro(
csv_folder: str, output_folder: str, word_limit: int, min_word_count: int = 5
) -> None:
"""
Create data files to be used for training the model.
Parameters
----------
csv_folder : str
Folder where the CSVs with the raw data are located
output_folder : str
Folder where files must be created
word_limit : int
Truncate long sentences to these many words
min_word_count : int
Discard rare words which occur fewer times than this number
"""
# --------------------- training data ---------------------
print('\nTraining data: reading and preprocessing...\n')
train_sents, train_labels, word_counter = read_csv(csv_folder, 'train', word_limit)
# create word map
word_map = dict()
word_map['<pad>'] = 0
for word, count in word_counter.items():
if count >= min_word_count:
word_map[word] = len(word_map)
word_map['<unk>'] = len(word_map)
print('\nTraining data: discarding words with counts less than %d, the size of the vocabulary is %d.\n' % (min_word_count, len(word_map)))
# save word map
with open(os.path.join(output_folder, 'word_map.json'), 'w') as j:
json.dump(word_map, j)
print('Training data: word map saved to %s.\n' % os.path.abspath(output_folder))
# encode and pad
print('Training data: encoding and padding...\n')
encoded_train_sents, words_per_train_sent = encode_and_pad(train_sents, word_map, word_limit)
# save
print('Training data: saving...\n')
assert len(encoded_train_sents) == len(train_labels) == len(words_per_train_sent)
# because of the large data, saving as a JSON can be very slow
torch.save({
'sents': encoded_train_sents,
'labels': train_labels,
'words_per_sentence': words_per_train_sent
}, os.path.join(output_folder, 'TRAIN_data.pth.tar'))
print('Training data: encoded, padded data saved to %s.\n' % os.path.abspath(output_folder))
# free some memory
del train_sents, encoded_train_sents, train_labels, words_per_train_sent
# --------------------- test data ---------------------
print('Test data: reading and preprocessing...\n')
test_sents, test_labels, _ = read_csv(csv_folder, 'test', word_limit)
# encode and pad
print('\nTest data: encoding and padding...\n')
encoded_test_sents, words_per_test_sent = encode_and_pad(test_sents, word_map, word_limit)
# save
print('Test data: saving...\n')
assert len(encoded_test_sents) == len(test_labels) == len(words_per_test_sent)
torch.save({
'sents': encoded_test_sents,
'labels': test_labels,
'words_per_sentence': words_per_test_sent
}, os.path.join(output_folder, 'TEST_data.pth.tar'))
print('Test data: encoded, padded data saved to %s.\n' % os.path.abspath(output_folder))
print('All done!\n')
| 5,431 | 29.516854 | 142 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/utils/embedding.py | import os
from tqdm import tqdm
from typing import Dict, Tuple
import numpy as np
import torch
def init_embeddings(embeddings: torch.Tensor) -> None:
"""
Fill embedding tensor with values from the uniform distribution.
Parameters
----------
embeddings : torch.Tensor
Word embedding tensor
"""
bias = np.sqrt(3.0 / embeddings.size(1))
torch.nn.init.uniform_(embeddings, -bias, bias)
def load_embeddings(
emb_file: str,
word_map: Dict[str, int],
output_folder: str
) -> Tuple[torch.Tensor, int]:
"""
Create an embedding tensor for the specified word map, for loading into the model.
Parameters
----------
emb_file : str
File containing embeddings (stored in GloVe format)
word_map : Dict[str, int]
Word2id map
output_folder : str
Path to the folder to store output files
Returns
-------
embeddings : torch.Tensor
Embeddings in the same order as the words in the word map
embed_dim : int
Dimension of the embeddings
"""
emb_basename = os.path.basename(emb_file)
cache_path = os.path.join(output_folder, emb_basename + '.pth.tar')
# no cache, load embeddings from .txt file
if not os.path.isfile(cache_path):
# find embedding dimension
with open(emb_file, 'r') as f:
embed_dim = len(f.readline().split(' ')) - 1
num_lines = len(f.readlines())
vocab = set(word_map.keys())
# create tensor to hold embeddings, initialize
embeddings = torch.FloatTensor(len(vocab), embed_dim)
init_embeddings(embeddings)
# read embedding file
for line in tqdm(open(emb_file, 'r'), total = num_lines, desc = 'Loading embeddings'):
line = line.split(' ')
emb_word = line[0]
embedding = list(map(lambda t: float(t), filter(lambda n: n and not n.isspace(), line[1:])))
# ignore word if not in train_vocab
if emb_word not in vocab:
continue
embeddings[word_map[emb_word]] = torch.FloatTensor(embedding)
# create cache file so we can load it quicker the next time
print('Saving vectors to {}'.format(cache_path))
torch.save((embeddings, embed_dim), cache_path)
# load embeddings from cache
else:
print('Loading embeddings from {}'.format(cache_path))
embeddings, embed_dim = torch.load(cache_path)
return embeddings, embed_dim
| 2,499 | 28.411765 | 104 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/utils/tensorboard.py | import importlib
from typing import Optional, Callable
from datetime import datetime
class TensorboardWriter:
"""
Log metrics into a directory for visualization within the TensorBoard.
Parameters
----------
log_dir : str, optional
Paht to the folder to save logs for TensorBoard
enabled : bool, optional, default=False
Enable TensorBoard or not
"""
def __init__(
self, log_dir: Optional[str] = None, enabled: bool = False
):
self.writer = None
self.selected_module = ""
if enabled:
log_dir = str(log_dir)
# retrieve vizualization writer
succeeded = False
for module in ["torch.utils.tensorboard", "tensorboardX"]:
try:
self.writer = importlib.import_module(module).SummaryWriter(log_dir)
succeeded = True
break
except ImportError:
succeeded = False
self.selected_module = module
if not succeeded:
message = "Warning: visualization (Tensorboard) is configured to use, but currently not installed on " \
"this machine. Please install TensorboardX with 'pip install tensorboardx', upgrade PyTorch to " \
"version >= 1.1 to use 'torch.utils.tensorboard' or turn off the option in the config file."
print(message)
self.step = 0
self.mode = ''
self.tb_writer_ftns = {
'add_scalar', 'add_scalars', 'add_image', 'add_images', 'add_audio',
'add_text', 'add_histogram', 'add_pr_curve', 'add_embedding'
}
self.tag_mode_exceptions = {'add_histogram', 'add_embedding'}
self.timer = datetime.now()
def set_step(self, step: int, mode: str = 'train') -> None:
self.mode = mode
self.step = step
if step == 0:
self.timer = datetime.now()
else:
duration = datetime.now() - self.timer
self.add_scalar('steps_per_second', 1 / duration.total_seconds())
self.timer = datetime.now()
def __getattr__(self, name: str) -> Callable:
if name in self.tb_writer_ftns:
add_data = getattr(self.writer, name, None)
def wrapper(tag, data, *args, **kwargs):
if add_data is not None:
if name not in self.tag_mode_exceptions:
tag = '{}/{}'.format(tag, self.mode)
add_data(tag, data, self.step, *args, **kwargs)
return wrapper
# default action for returning methods defined in this class, set_step() for instance
else:
try:
attr = object.__getattr__(name)
except AttributeError:
raise AttributeError("Type object '{}' has no attribute '{}'".format(self.selected_module, name))
return attr
| 2,982 | 35.82716 | 122 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlp_dl_bench/utils/common.py | import os
from typing import Tuple, Dict
import torch
from torch import nn, optim
def save_checkpoint(
epoch: int,
model: nn.Module,
model_name: str,
optimizer: optim.Optimizer,
dataset_name: str,
word_map: Dict[str, int],
checkpoint_path: str,
checkpoint_basename: str = 'checkpoint'
) -> None:
"""
Save a model checkpoint
Parameters
----------
epoch : int
Epoch number the current checkpoint have been trained for
model : nn.Module
Model
model_name : str
Name of the model
optimizer : optim.Optimizer
Optimizer to update the model's weights
dataset_name : str
Name of the dataset
word_map : Dict[str, int]
Word2ix map
checkpoint_path : str
Path to save the checkpoint
checkpoint_basename : str
Basename of the checkpoint
"""
state = {
'epoch': epoch,
'model': model,
'model_name': model_name,
'optimizer': optimizer,
'dataset_name': dataset_name,
'word_map': word_map
}
save_path = os.path.join(checkpoint_path, checkpoint_basename + '.pth.tar')
torch.save(state, save_path)
def load_checkpoint(
checkpoint_path: str, device: torch.device
) -> Tuple[nn.Module, str, optim.Optimizer, str, Dict[str, int], int]:
"""
Load a checkpoint, so that we can continue to train on it
Parameters
----------
checkpoint_path : str
Path to the checkpoint to be loaded
device : torch.device
Remap the model to which device
Returns
-------
model : nn.Module
Model
model_name : str
Name of the model
optimizer : optim.Optimizer
Optimizer to update the model's weights
dataset_name : str
Name of the dataset
word_map : Dict[str, int]
Word2ix map
start_epoch : int
We should start training the model from __th epoch
"""
checkpoint = torch.load(checkpoint_path, map_location=str(device))
model = checkpoint['model']
model_name = checkpoint['model_name']
optimizer = checkpoint['optimizer']
dataset_name = checkpoint['dataset_name']
word_map = checkpoint['word_map']
start_epoch = checkpoint['epoch'] + 1
return model, model_name, optimizer, dataset_name, word_map, start_epoch
def clip_gradient(optimizer: optim.Optimizer, grad_clip: float) -> None:
"""
Clip gradients computed during backpropagation to avoid explosion of gradients.
Parameters
----------
optimizer : optim.Optimizer
Optimizer with the gradients to be clipped
grad_clip : float
Gradient clip value
"""
for group in optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.clamp_(-grad_clip, grad_clip)
class AverageMeter:
"""
Keep track of most recent, average, sum, and count of a metric
"""
def __init__(self, tag = None, writer = None):
self.writer = writer
self.tag = tag
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n = 1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# tensorboard
if self.writer is not None:
self.writer.add_scalar(self.tag, val)
def adjust_learning_rate(optimizer: optim.Optimizer, scale_factor: float) -> None:
"""
Shrink learning rate by a specified factor.
Parameters
----------
optimizer : optim.Optimizer
Optimizer whose learning rate must be shrunk
shrink_factor : float
Factor in interval (0, 1) to multiply learning rate with
"""
print("\nDECAYING learning rate.")
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * scale_factor
print("The new learning rate is %f\n" % (optimizer.param_groups[0]['lr'],))
| 4,047 | 24.459119 | 83 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/imagenet_dl_bench/pre_process/images_raw_to_tfrecord.py | import matplotlib.pyplot as plt
from PIL import Image
from torchvision import transforms
import numpy as np
import torch
import sys
import os
import datetime
sys.path.append("../shuffleformat/tfrecord")
sys.path.append("../shuffleformat/corgipile")
sys.path.append(".")
import shuffleformat.tfrecord as tfrecord
import shuffleformat.corgipile as corgipile
import random
import io
def test(img_path):
img = Image.open(img_path)
img = np.array(img)
print(img)
print(img.shape)
print(img.dtype)
plt.imshow(img)
plt.show()
def write_to_TFRecord(image_dir, image_file_list, output_record_file,
trans=None, shuffle=False):
writer = tfrecord.writer.TFRecordWriter(output_record_file)
if shuffle:
random.shuffle(image_file_list)
i = 0
for image in image_file_list:
img_path = image[0]
label = image[1]
index = image[2]
image_bytes = open(os.path.join(image_dir, img_path), "rb").read()
#print(img)
# tensor([[[0.7137, 0.7059, 0.7059, ..., 0.2000, 0.2353, 0.2549],
# [0.7059, 0.7216, 0.6863, ..., 0.1569, 0.1922, 0.2118],
# [0.7098, 0.7216, 0.7137, ..., 0.1176, 0.1647, 0.1843],
#print(trans(img))
# print(img)
# plt.imshow(img)
# plt.show()
writer.write({
"image": (image_bytes, "byte"),
"label": (label, "int"),
"index": (index, "int")
})
i += 1
if (i % 1000 == 0):
print('Has written', i, 'images', flush=True)
writer.close()
# decode image
def decode_image(features):
img_bytes = features["image"]
features["image"] = Image.open(io.BytesIO(img_bytes)).convert("RGB")
read_trans = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
features["image"] = read_trans(features["image"])
features["label"] = features["label"][0]
return features
def read_image(tfrecord_path, index_path, mode = 'seq'):
description = {"image": "byte", "label": "int", "index": "int"}
if mode == 'block':
dataset = corgipile.dataset.CorgiPileTFRecordDataset(
tfrecord_path,
index_path,
# block_num=75700,
# buffer_size_ratio=0.00002,
block_num=500,
buffer_size_ratio=0.1,
description=description,
transform=decode_image)
else:
dataset = tfrecord.torch.dataset.TFRecordDataset(
tfrecord_path,
index_path,
description,
transform=decode_image)
loader = torch.utils.data.DataLoader(dataset, batch_size=128, num_workers=1)
epochs = 1
for epoch in range(0, epochs):
if mode == 'block':
dataset.set_epoch(epoch)
start_time = datetime.datetime.now()
for i, images in enumerate(loader):
for label in images['label']:
print(label)
j = 0
for img in images['image']:
img
j = j + 1
#print(images['label'])
end_time = datetime.datetime.now()
strTime = 'read time = %dms' % ((end_time - start_time).seconds * 1000 + (end_time - start_time).microseconds / 1000)
print(strTime, "[", j, " images]")
start_time = end_time
if i == 1200:
break
def build_index(output_record_file, index_file):
tfrecord.tools.tfrecord2idx.create_index(output_record_file, index_file)
def get_class_labels(image_dir):
classes = [d.name for d in os.scandir(image_dir) if d.is_dir()]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return class_to_idx
def fetch_all_images(image_absolute_dir, class_labels, max_num=0):
image_file_list = []
index = 0
for img_dir in os.listdir(image_absolute_dir):
if not img_dir.startswith('.'):
class_label = class_labels[img_dir]
for img_file in os.listdir(os.path.join(image_absolute_dir, img_dir)):
if not img_file.startswith('.'):
path = os.path.join(img_dir, img_file)
index += 1
image_file_list.append((path, class_label, index))
if len(image_file_list) == max_num:
return image_file_list
return image_file_list
def generate_tfRecords_with_index(image_dir, class_labels,
output_record_file, index_file, max_num=0, shuffle=False):
image_file_list = fetch_all_images(image_dir, class_labels, max_num)
write_to_TFRecord(image_dir, image_file_list, output_record_file,
trans=None, shuffle=shuffle)
build_index(output_record_file, index_file)
def main():
base_dir = "/mnt/ds3lab-scratch/shaodgan/"
raw_images_base_dir = base_dir + "ImageNet/"
tfrecord_output_base_dir = os.path.join("/mnt/ds3lab-scratch/xuliji/corgipile_data/", "ImageNet-all-raw-tfrecords")
shuffle = False # True if you would like to get shuffled images in the generated TFRecords
generate_tfRecords = True
read_tfRecords = False
images_num = 0 # the number of images included in the gerated TFRecords, 0 for all the images
image_train_dir = os.path.join(raw_images_base_dir, "train")
train_output_dir = os.path.join(tfrecord_output_base_dir, "train")
train_tfrecord_file = os.path.join(train_output_dir, "train_clustered.tfrecord")
train_index_file = os.path.join(train_output_dir, "train_clustered.index")
image_val_dir = os.path.join(raw_images_base_dir, "val")
val_output_dir = os.path.join(tfrecord_output_base_dir, "val")
val_tfrecord_file = os.path.join(val_output_dir, "val_clustered.tfrecord")
val_index_file = os.path.join(val_output_dir, "val_clustered.index")
if not os.path.exists(train_output_dir):
os.makedirs(train_output_dir)
if not os.path.exists(val_output_dir):
os.makedirs(val_output_dir)
if generate_tfRecords:
# assert the train and val images have the same class labels
print("[Parse] Parsing class labels of training images.")
train_class_labels = get_class_labels(image_train_dir)
print("[Parse] Parsing class labels of validation images.")
val_class_labels = get_class_labels(image_val_dir)
assert(train_class_labels == val_class_labels)
print("[TFRecords] Generating tfrecords of training images.")
generate_tfRecords_with_index(image_train_dir, train_class_labels,
train_tfrecord_file, train_index_file, max_num=images_num, shuffle=shuffle)
print("[TFRecords] Generating tfrecords of validation images.")
generate_tfRecords_with_index(image_val_dir, val_class_labels,
val_tfrecord_file, val_index_file, max_num=images_num)
if read_tfRecords:
print("[Test] Reading tfrecords of training images.")
start_time = datetime.datetime.now()
mode = 'seq'
read_image(train_tfrecord_file, train_index_file, mode)
print("read val")
read_image(val_tfrecord_file, val_index_file, mode)
end_time = datetime.datetime.now()
strTime = '[Test] data read time = %d ms' % ((end_time - start_time).seconds * 1000 + (end_time - start_time).microseconds / 1000)
print(strTime)
if __name__ == '__main__':
main() | 8,062 | 35.31982 | 138 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/imagenet_dl_bench/normal_node/imagenet_corgipile_raw_train.py | import argparse
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2, 3, 4, 5, 6, 7'
import random
import shutil
import time
import warnings
from enum import Enum
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
from torch.optim.lr_scheduler import StepLR
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import sys
import numpy as np
import io
from torch.distributed.algorithms.join import Join
from PIL import Image
# Using the following two lines if running on normal nodes
sys.path.append("../shuffleformat")
sys.path.append(".")
# for running on Euler nodes
# sys.path.append("/nfs/iiscratch-zhang.inf.ethz.ch/export/zhang/export/xuliji/code/CorgiPile-PyTorch/shuffleformat")
# sys.path.append("/nfs/iiscratch-zhang.inf.ethz.ch/export/zhang/export/xuliji/code/CorgiPile-PyTorch/")
import shuffleformat.tfrecord as tfrecord
import shuffleformat.corgipile as corgipile
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
best_acc1 = 0
'''
# Single node, multiple GPUs:
# python main.py -a resnet50 --dist-url 'tcp://127.0.0.1:FREEPORT'
# --dist-backend 'nccl' --multiprocessing-distributed
# --world-size 1 --rank 0 [imagenet-folder with train and val folders]
# Multiple nodes:
# Node 0: python main.py -a resnet50 --dist-url 'tcp://IP_OF_NODE0:FREEPORT'
# --dist-backend 'nccl' --multiprocessing-distributed
# --world-size 2 --rank 0
# [imagenet-folder with train and val folders]
# Node 1: python main.py -a resnet50 --dist-url 'tcp://IP_OF_NODE0:FREEPORT'
# --dist-backend 'nccl' --multiprocessing-distributed
# --world-size 2 --rank 1
# [imagenet-folder with train and val folders]
'''
def get_data_path(image_type, data_name, node, shuffle=False):
if node == 'euler':
basedir = '/cluster/work/zhang/xuliji/'
else:
basedir = '/mnt/ds3lab-scratch/xuliji/'
if image_type == 'raw':
if data_name == 'imagenette':
data_path = basedir + "corgipile_data/imagenette2-raw-tfrecords"
if shuffle == True:
data_path = basedir + "corgipile_data/imagenette2-raw-tfrecords-shuffle"
elif data_name == 'ImageNet':
data_path = basedir + "corgipile_data/ImageNet-all-raw-tfrecords"
if shuffle == True:
data_path = basedir + "corgipile_data/ImageNet-all-raw-tfrecords-shuffle"
else:
raise ValueError('This dataset is not supported currently!')
elif image_type == 'RGB':
if data_name == 'imagenette':
data_path = basedir + "corgipile_data/imagenette2-tfrecords"
if shuffle == True:
data_path = basedir + "corgipile_data/imagenette2-tfrecords-shuffle"
elif data_name == 'ImageNet':
data_path = basedir + "corgipile_data/ImageNet-all-tfrecords"
if shuffle == True:
data_path = basedir + "corgipile_data/ImageNet-all-tfrecords-shuffle"
else:
raise ValueError('This dataset is not supported currently!')
else:
raise ValueError('This data type is not supported currently!')
return data_path
def main():
image_type = 'raw' # or 'RGB'
#data_name = 'imagenette'
data_name = 'ImageNet'
node = 'normal'
data_path = get_data_path(image_type, data_name, node)
# log_base_dir = '/mnt/ds3lab-scratch/xuliji/code/CorgiPile-PyTorch'
log_base_dir = '/nfs/iiscratch-zhang.inf.ethz.ch/export/zhang/export/xuliji/code/CorgiPile-PyTorch'
log_dir = 'train_log_' + data_name + '_sgd'
model_name = "resnet50"
data_loading_workers_num = 16
epoch_num = 100
start_epoch = 0
batch_sizes = [512]
learning_rates = [0.1]
momentum = 0.9
weight_decay = 1e-4
print_freq = 5
world_size = 1 # the number of nodes
rank = 0
dist_url = 'tcp://127.0.0.1:23456'
dist_backend = 'nccl'
multiprocessing_distributed = True
shuffle_modes = ['block']
# shuffle_mode == ['no_shuffle', 'once_shuffle']
block_num = 14000 #28000, {Total size = 140GB, 14000 blocks = 10MB per block, 28000 blocks = 5MB per block}
buffer_size_ratio = 0.0125 # {Total buffer size = 0.0125 * 8 GPUs = 10% of the whole dataset}
seed = None
gpu = None
args = {}
args['image_type'] = image_type
args['data'] = data_path
args['model_name'] = model_name
args['arch'] = model_name
args['epochs'] = epoch_num
args['workers'] = data_loading_workers_num
args['data_name'] = data_name
args['momentum'] = momentum
args['weight_decay'] = weight_decay
args['print_freq'] = print_freq
args['world_size'] = world_size
args['rank'] = rank
args['dist_backend'] = dist_backend
args['dist_url'] = dist_url
args['block_num'] = block_num
args['buffer_size_ratio'] = buffer_size_ratio
args['start_epoch'] = start_epoch
args['seed'] = seed
args['gpu'] = gpu
args['multiprocessing_distributed'] = multiprocessing_distributed
args['pretrained'] = False
args['resume'] = False
args['evaluate'] = False
# args['gpu_type'] = torch.cuda.get_device_name(0)
for batch_size in batch_sizes:
args['batch_size'] = batch_size
for learning_rate in learning_rates:
args['lr'] = learning_rate
for shuffle_mode in shuffle_modes:
args['shuffle_mode'] = shuffle_mode
if shuffle_mode == 'once_shuffle':
args['data'] = get_data_path(image_type, data_name, node, shuffle=True)
acc_log_txt = shuffle_mode + '_' + data_name + '_lr' + str(learning_rate) + '_' + get_current_time_filename() #+ '.txt'
batch_run_log = shuffle_mode + '_' + data_name + '_lr' + str(learning_rate) + '_' + get_current_time_filename() #+ '.log'
outdir = os.path.join(log_base_dir, log_dir, data_name, model_name, 'sgd-bs' + str(batch_size), shuffle_mode)
acc_log_file = os.path.join(outdir, 'acc', acc_log_txt)
batch_log_file = os.path.join(outdir, 'log', batch_run_log)
args['acc_log_file'] = acc_log_file
args['batch_log_file'] = batch_log_file
if not os.path.exists(os.path.join(outdir, 'acc')):
os.makedirs(os.path.join(outdir, 'acc'))
if not os.path.exists(os.path.join(outdir, 'log')):
os.makedirs(os.path.join(outdir, 'log'))
if args['seed'] is not None:
random.seed(args['seed'])
torch.manual_seed(args['seed'])
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args['gpu'] is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args['dist_url'] == "env://" and args['world_size'] == -1:
args['world_size'] = int(os.environ["WORLD_SIZE"])
# Single node, multiple GPUs: world_size = 1, Multiple nodes: e.g., world_size = 2
args['distributed'] = args['world_size'] > 1 or args['multiprocessing_distributed']
ngpus_per_node = torch.cuda.device_count()
args['ngpus_per_node'] = ngpus_per_node
if args['multiprocessing_distributed']:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
# world_size = 8 GPUs * 2
args['world_size'] = ngpus_per_node * args['world_size']
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
# spawn nprocs
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args['gpu'], ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args, join=True):
global best_acc1
args['gpu'] = gpu
args['ngpus_per_node'] = ngpus_per_node
args['gpu_type'] = torch.cuda.get_device_name(0)
acc_log_file = args['acc_log_file'] + '-gpu' + str(gpu) + '.txt'
batch_log_file = args['batch_log_file'] + '-gpu' + str(gpu) + '.log'
writer = open(acc_log_file, 'w')
batch_log_writer = open(batch_log_file, 'w')
# writer = sys.stdout
# batch_log_writer = sys.stdout
for k in args:
writer.write("[params] " + str(k) + " = " + str(args[k]) + '\n')
batch_log_writer.write("[params] " + str(k) + " = " + str(args[k]) + '\n')
writer.flush()
batch_log_writer.flush()
writer.write('[%s] Start iteration\n' % get_current_time())
batch_log_writer.write('[%s] Start iteration\n' % get_current_time())
if gpu is not None:
writer.write("Use GPU: {} for training\n".format(gpu))
batch_log_writer.write("Use GPU: {} for training\n".format(gpu))
# print("Use GPU: {} for training".format(args['gpu']))
if args['distributed']:
if args['dist_url'] == "env://" and args.rank == -1:
args['rank'] = int(os.environ["RANK"])
if args['multiprocessing_distributed']:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
# rank = 1 * 8 + gpu (global rank)
args['rank'] = args['rank'] * ngpus_per_node + gpu
# here, world_size = global_procs_num, rank = global_current_procs_num
dist.init_process_group(backend=args['dist_backend'], init_method=args['dist_url'],
world_size=args['world_size'], rank=args['rank'])
# create model
if args['pretrained']:
arch = args['arch']
writer.write("=> using pre-trained model '{}'\n".format(arch))
batch_log_writer.write("=> using pre-trained model '{}'".format(arch))
model = models.__dict__[arch](pretrained=True)
else:
arch = args['arch']
writer.write("=> creating model '{}'\n".format(arch))
batch_log_writer.write("=> creating model '{}'\n".format(arch))
model = models.__dict__[arch]()
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args['distributed']:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if gpu is not None:
torch.cuda.set_device(gpu)
model.cuda(gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs of the current node.
ngpus_per_node = args['ngpus_per_node']
args['batch_size'] = int(args['batch_size'] / ngpus_per_node)
args['workers'] = int((args['workers'] + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif gpu is not None:
torch.cuda.set_device(gpu)
model = model.cuda(gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
arch = args['arch']
if arch.startswith('alexnet') or arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion), optimizer, and learning rate scheduler
criterion = nn.CrossEntropyLoss().cuda(gpu)
optimizer = torch.optim.SGD(model.parameters(), args['lr'],
momentum=args['momentum'],
weight_decay=args['weight_decay'])
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
scheduler = StepLR(optimizer, step_size=30, gamma=0.1)
# optionally resume from a checkpoint
if args['resume']:
if os.path.isfile(args['resume']):
writer.write("=> loading checkpoint '{}'\n".format(args['resume']))
batch_log_writer.write("=> loading checkpoint '{}'".format(args['resume']))
if gpu is None:
checkpoint = torch.load(args['resume'])
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(gpu)
checkpoint = torch.load(args['resume'], map_location=loc)
args['start_epoch'] = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
writer.write("=> loaded checkpoint '{}' (epoch {})\n"
.format(args.resume, checkpoint['epoch']))
batch_log_writer.write("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
writer.write("=> no checkpoint found at '{}'\n".format(args.resume))
batch_log_writer.write("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args['data'], 'train')
valdir = os.path.join(args['data'], 'val')
train_tfrecord_file = os.path.join(traindir, "train_clustered.tfrecord")
train_index_file = os.path.join(traindir, "train_clustered.index")
val_tfrecord_file = os.path.join(valdir, "val_clustered.tfrecord")
val_index_file = os.path.join(valdir, "val_clustered.index")
if args['image_type'] == 'RGB':
description = {"image": "byte", "label": "int", "index": "int", "width": "int", "height": "int"}
else:
description = {"image": "byte", "label": "int", "index": "int"}
shuffle_mode = args['shuffle_mode']
# once_shuffle: the image data is shufffled => tfrecords.
# block: the image data is clustered.
# no_shuffle: the image data is clustered.
# epoch_shuffle: directly read the image data from jpgs.
# shuffle_modes = ['once_shuffle', 'block', 'no_shuffle', 'epoch_shuffle']
if shuffle_mode == 'block':
train_dataset = corgipile.dataset.CorgiPileTFRecordDataset(
train_tfrecord_file,
train_index_file,
block_num=args['block_num'],
buffer_size_ratio=args['buffer_size_ratio'],
description=description,
transform=decode_train_raw_image,
trans_after_buffered=True,
distributed=args['distributed'])
elif shuffle_mode == 'no_shuffle' or shuffle_mode == 'once_shuffle':
train_dataset = corgipile.dataset.SeqTFRecordDataset(
train_tfrecord_file,
train_index_file,
num_workers = args['workers'],
description=description,
transform=decode_train_raw_image,
trans_after_buffered=True,
distributed=args['distributed'],
data_partition=True)
elif shuffle_mode == 'epoch_shuffle':
train_dataset = corgipile.dataset.RandomAccessTFRecordDataset(
train_tfrecord_file,
train_index_file,
description=description,
transform=decode_train_raw_image,
trans_after_buffered=True,
distributed=args['distributed'])
val_dataset = corgipile.dataset.SeqTFRecordDataset(
val_tfrecord_file,
val_index_file,
num_workers = args['workers'],
description=description,
transform=decode_val_raw_image,
trans_after_buffered=True,
distributed=args['distributed'],
data_partition=True)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args['batch_size'], shuffle=False,
num_workers=args['workers'], pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args['batch_size'], shuffle=False,
num_workers=args['workers'], pin_memory=True)
if args['evaluate']:
validate(batch_log_writer, val_loader, model, criterion, args)
return
avg_exec_t = 0.0
avg_grad_t = 0.0
avg_loss_t = 0.0
first_exec_t = 0.0
first_grad_t = 0.0
first_loss_t = 0.0
second_exec_t = 0.0
second_grad_t = 0.0
second_loss_t = 0.0
max_acc1 = 0.0
max_acc5 = 0.0
batch_log_writer.write('[%s] Start training\n' % get_current_time())
for epoch in range(args['start_epoch'], args['epochs']):
start = time.time()
if shuffle_mode == 'block' or shuffle_mode == 'epoch_shuffle':
train_dataset.set_epoch(epoch)
with Join([model]):
# train for one epoch
train(batch_log_writer, train_loader, model, criterion, optimizer, epoch, args)
grad_end = time.time()
# evaluate on validation set
acc1, acc5, num_val_records = validate(batch_log_writer, val_loader, model, criterion, args)
loss_end = time.time()
exec_t = loss_end - start
grad_t = grad_end - start
loss_t = exec_t - grad_t
scheduler.step()
i = epoch
avg_exec_t += exec_t
avg_grad_t += grad_t
avg_loss_t += loss_t
if i == 0:
first_exec_t = exec_t
first_grad_t = grad_t
first_loss_t = loss_t
elif i == 1:
second_exec_t = exec_t
second_grad_t = grad_t
second_loss_t = loss_t
writer.write('[%s] [Epoch %2d] acc1 = %.2f, acc5 = %.2f, exec_t = %.2fs, train_t = %.2fs, val_t = %.2fs, num_record = %d\n' %
(get_current_time(), i + 1, acc1, acc5, round(exec_t, 2), round(grad_t, 2), round(loss_t, 2), num_val_records))
writer.flush()
if acc1 > max_acc1:
max_acc1 = acc1
if acc5 > max_acc5:
max_acc5 = acc5
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args['multiprocessing_distributed'] or (args['multiprocessing_distributed']
and args['rank'] % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args['arch'],
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
'scheduler' : scheduler.state_dict()
}, is_best)
epoch_num = args['epochs'] - args['start_epoch']
writer.write('[%s] [Finish] avg_exec_t = %.2fs, avg_train_t = %.2fs, avg_val_t = %.2fs\n' %
(get_current_time(), avg_exec_t / epoch_num,
avg_grad_t / epoch_num, avg_loss_t / epoch_num))
writer.write('\n')
if epoch_num > 2:
avg_exec_t -= first_exec_t
avg_grad_t -= first_grad_t
avg_loss_t -= first_loss_t
writer.write('[%s] [-first] avg_exec_t = %.2fs, avg_train_t = %.2fs, avg_val_t = %.2fs\n' %
(get_current_time(), avg_exec_t / (epoch_num - 1),
avg_grad_t / (epoch_num - 1), avg_loss_t / (epoch_num - 1)))
avg_exec_t -= second_exec_t
avg_grad_t -= second_grad_t
avg_loss_t -= second_loss_t
writer.write('[%s] [-1 & 2] avg_exec_t = %.2fs, avg_train_t = %.2fs, avg_val_t = %.2fs\n' %
(get_current_time(), avg_exec_t / (epoch_num - 2),
avg_grad_t / (epoch_num - 2), avg_loss_t / (epoch_num - 2)))
writer.write('[%s] [MaxAcc] max_acc1 = %.2f, max_acc5 = %.2f\n' %
(get_current_time(), max_acc1, max_acc5))
writer.close()
batch_log_writer.close()
def train(writer, train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
writer,
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
gpu = args['gpu']
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if gpu is not None:
images = images.cuda(gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args['print_freq'] == 0:
progress.display(i)
def validate(writer, val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f', Summary.NONE)
losses = AverageMeter('Loss', ':.4e', Summary.NONE)
top1 = AverageMeter('Acc@1', ':6.2f', Summary.AVERAGE)
top5 = AverageMeter('Acc@5', ':6.2f', Summary.AVERAGE)
progress = ProgressMeter(
writer,
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
gpu = args['gpu']
num_records = 0
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if gpu is not None:
images = images.cuda(gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args['print_freq'] == 0:
progress.display(i)
num_records += images.size(0)
progress.display_summary()
return (top1.avg, top5.avg, num_records)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class Summary(Enum):
NONE = 0
AVERAGE = 1
SUM = 2
COUNT = 3
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE):
self.name = name
self.fmt = fmt
self.summary_type = summary_type
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def summary(self):
fmtstr = ''
if self.summary_type is Summary.NONE:
fmtstr = ''
elif self.summary_type is Summary.AVERAGE:
fmtstr = '{name} {avg:.3f}'
elif self.summary_type is Summary.SUM:
fmtstr = '{name} {sum:.3f}'
elif self.summary_type is Summary.COUNT:
fmtstr = '{name} {count:.3f}'
else:
raise ValueError('invalid summary type %r' % self.summary_type)
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, writer, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
self.writer = writer
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
# print('\t'.join(entries))
self.writer.write('[' + get_current_time() + '] ' + '\t'.join(entries) + '\n')
self.writer.flush()
def display_summary(self):
entries = [" *"]
entries += [meter.summary() for meter in self.meters]
# print(' '.join(entries))
self.writer.write('[' + get_current_time() + '] ' + ' '.join(entries) + '\n')
self.writer.flush()
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
# decode image
def decode_train_RGB_image(features):
width = features["width"][0]
height = features["height"][0]
img_numpy_array = features["image"].reshape((height, width, 3))
img_numpy_array = Image.fromarray(np.uint8(img_numpy_array))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
read_trans = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
features_image = read_trans(img_numpy_array)
features_label = features["label"][0]
return (features_image, features_label)
def decode_train_raw_image(features):
img_bytes = Image.open(io.BytesIO(features["image"])).convert("RGB")
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
read_trans = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
features_image = read_trans(img_bytes)
features_label = features["label"][0]
features['image'] = None
features["label"] = None
return (features_image, features_label)
def decode_val_raw_image(features):
img_bytes = Image.open(io.BytesIO(features["image"])).convert("RGB")
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
read_trans = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
features_image = read_trans(img_bytes)
features_label = features["label"][0]
features['image'] = None
features["label"] = None
return (features_image, features_label)
def decode_val_RGB_image(features):
width = features["width"][0]
height = features["height"][0]
img_numpy_array = features["image"].reshape((height, width, 3))
img_numpy_array = Image.fromarray(np.uint8(img_numpy_array))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
read_trans = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
features_image = read_trans(img_numpy_array)
features_label = features["label"][0]
return (features_image, features_label)
def get_current_time() :
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def get_current_time_filename():
return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
if __name__ == '__main__':
main() | 30,945 | 35.025611 | 137 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/imagenet_dl_bench/euler/imagenet_corgipile_raw_train_on_euler.py | import argparse
import os
import random
import shutil
import time
import warnings
from enum import Enum
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
from torch.optim.lr_scheduler import StepLR
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import sys
import numpy as np
import io
from torch.distributed.algorithms.join import Join
from PIL import Image
# Using the following two lines if running on normal nodes
# sys.path.append("../shuffleformat")
# sys.path.append(".")
# for running on Euler nodes
sys.path.append("/nfs/iiscratch-zhang.inf.ethz.ch/export/zhang/export/xuliji/code/CorgiPile-PyTorch/shuffleformat")
sys.path.append("/nfs/iiscratch-zhang.inf.ethz.ch/export/zhang/export/xuliji/code/CorgiPile-PyTorch/")
import shuffleformat.tfrecord as tfrecord
import shuffleformat.corgipile as corgipile
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
best_acc1 = 0
'''
# Single node, multiple GPUs:
# python main.py -a resnet50 --dist-url 'tcp://127.0.0.1:FREEPORT'
# --dist-backend 'nccl' --multiprocessing-distributed
# --world-size 1 --rank 0 [imagenet-folder with train and val folders]
# Multiple nodes:
# Node 0: python main.py -a resnet50 --dist-url 'tcp://IP_OF_NODE0:FREEPORT'
# --dist-backend 'nccl' --multiprocessing-distributed
# --world-size 2 --rank 0
# [imagenet-folder with train and val folders]
# Node 1: python main.py -a resnet50 --dist-url 'tcp://IP_OF_NODE0:FREEPORT'
# --dist-backend 'nccl' --multiprocessing-distributed
# --world-size 2 --rank 1
# [imagenet-folder with train and val folders]
'''
def get_data_path(image_type, data_name, node, shuffle=False):
if node == 'euler':
basedir = '/cluster/work/zhang/xuliji/'
else:
basedir = '/mnt/ds3lab-scratch/xuliji/'
if image_type == 'raw':
if data_name == 'imagenette':
data_path = basedir + "corgipile_data/imagenette2-raw-tfrecords"
if shuffle == True:
data_path = basedir + "corgipile_data/imagenette2-raw-tfrecords-shuffle"
elif data_name == 'ImageNet':
data_path = basedir + "corgipile_data/ImageNet-all-raw-tfrecords"
if shuffle == True:
data_path = basedir + "corgipile_data/ImageNet-all-raw-tfrecords-shuffle"
else:
raise ValueError('This dataset is not supported currently!')
elif image_type == 'RGB':
if data_name == 'imagenette':
data_path = basedir + "corgipile_data/imagenette2-tfrecords"
if shuffle == True:
data_path = basedir + "corgipile_data/imagenette2-tfrecords-shuffle"
elif data_name == 'ImageNet':
data_path = basedir + "corgipile_data/ImageNet-all-tfrecords"
if shuffle == True:
data_path = basedir + "corgipile_data/ImageNet-all-tfrecords-shuffle"
else:
raise ValueError('This dataset is not supported currently!')
else:
raise ValueError('This data type is not supported currently!')
return data_path
def main():
image_type = 'raw' # or 'RGB'
#data_name = 'imagenette'
data_name = 'ImageNet'
node = 'euler'
data_path = get_data_path(image_type, data_name, node)
# log_base_dir = '/mnt/ds3lab-scratch/xuliji/code/CorgiPile-PyTorch'
log_base_dir = '/nfs/iiscratch-zhang.inf.ethz.ch/export/zhang/export/xuliji/code/CorgiPile-PyTorch'
log_dir = 'train_log_' + data_name + '_sgd'
model_name = "resnet50"
data_loading_workers_num = 16
epoch_num = 100
start_epoch = 0
batch_sizes = [512]
learning_rates = [0.1]
momentum = 0.9
weight_decay = 1e-4
print_freq = 5
world_size = 1 # the number of nodes
rank = 0
dist_url = 'tcp://127.0.0.1:23456'
dist_backend = 'nccl'
multiprocessing_distributed = True
shuffle_modes = ['block']
# shuffle_mode == ['no_shuffle', 'once_shuffle']
block_num = 14000 #28000, {Total size = 140GB, 14000 blocks = 10MB per block, 28000 blocks = 5MB per block}
buffer_size_ratio = 0.0125 # {Total buffer size = 0.0125 * 8 GPUs = 10% of the whole dataset}
seed = None
gpu = None
args = {}
args['image_type'] = image_type
args['data'] = data_path
args['model_name'] = model_name
args['arch'] = model_name
args['epochs'] = epoch_num
args['workers'] = data_loading_workers_num
args['data_name'] = data_name
args['momentum'] = momentum
args['weight_decay'] = weight_decay
args['print_freq'] = print_freq
args['world_size'] = world_size
args['rank'] = rank
args['dist_backend'] = dist_backend
args['dist_url'] = dist_url
args['block_num'] = block_num
args['buffer_size_ratio'] = buffer_size_ratio
args['start_epoch'] = start_epoch
args['seed'] = seed
args['gpu'] = gpu
args['multiprocessing_distributed'] = multiprocessing_distributed
args['pretrained'] = False
args['resume'] = False
args['evaluate'] = False
# args['gpu_type'] = torch.cuda.get_device_name(0)
for batch_size in batch_sizes:
args['batch_size'] = batch_size
for learning_rate in learning_rates:
args['lr'] = learning_rate
for shuffle_mode in shuffle_modes:
args['shuffle_mode'] = shuffle_mode
if shuffle_mode == 'once_shuffle':
args['data'] = get_data_path(image_type, data_name, node, shuffle=True)
acc_log_txt = shuffle_mode + '_' + data_name + '_lr' + str(learning_rate) + '_' + get_current_time_filename() #+ '.txt'
batch_run_log = shuffle_mode + '_' + data_name + '_lr' + str(learning_rate) + '_' + get_current_time_filename() #+ '.log'
outdir = os.path.join(log_base_dir, log_dir, data_name, model_name, 'sgd-bs' + str(batch_size), shuffle_mode)
acc_log_file = os.path.join(outdir, 'acc', acc_log_txt)
batch_log_file = os.path.join(outdir, 'log', batch_run_log)
args['acc_log_file'] = acc_log_file
args['batch_log_file'] = batch_log_file
if not os.path.exists(os.path.join(outdir, 'acc')):
os.makedirs(os.path.join(outdir, 'acc'))
if not os.path.exists(os.path.join(outdir, 'log')):
os.makedirs(os.path.join(outdir, 'log'))
if args['seed'] is not None:
random.seed(args['seed'])
torch.manual_seed(args['seed'])
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args['gpu'] is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args['dist_url'] == "env://" and args['world_size'] == -1:
args['world_size'] = int(os.environ["WORLD_SIZE"])
# Single node, multiple GPUs: world_size = 1, Multiple nodes: e.g., world_size = 2
args['distributed'] = args['world_size'] > 1 or args['multiprocessing_distributed']
ngpus_per_node = torch.cuda.device_count()
args['ngpus_per_node'] = ngpus_per_node
if args['multiprocessing_distributed']:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
# world_size = 8 GPUs * 2
args['world_size'] = ngpus_per_node * args['world_size']
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
# spawn nprocs
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args['gpu'], ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args, join=True):
global best_acc1
args['gpu'] = gpu
args['ngpus_per_node'] = ngpus_per_node
args['gpu_type'] = torch.cuda.get_device_name(0)
acc_log_file = args['acc_log_file'] + '-gpu' + str(gpu) + '.txt'
batch_log_file = args['batch_log_file'] + '-gpu' + str(gpu) + '.log'
writer = open(acc_log_file, 'w')
batch_log_writer = open(batch_log_file, 'w')
# writer = sys.stdout
# batch_log_writer = sys.stdout
for k in args:
writer.write("[params] " + str(k) + " = " + str(args[k]) + '\n')
batch_log_writer.write("[params] " + str(k) + " = " + str(args[k]) + '\n')
writer.flush()
batch_log_writer.flush()
writer.write('[%s] Start iteration\n' % get_current_time())
batch_log_writer.write('[%s] Start iteration\n' % get_current_time())
if gpu is not None:
writer.write("Use GPU: {} for training\n".format(gpu))
batch_log_writer.write("Use GPU: {} for training\n".format(gpu))
# print("Use GPU: {} for training".format(args['gpu']))
if args['distributed']:
if args['dist_url'] == "env://" and args.rank == -1:
args['rank'] = int(os.environ["RANK"])
if args['multiprocessing_distributed']:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
# rank = 1 * 8 + gpu (global rank)
args['rank'] = args['rank'] * ngpus_per_node + gpu
# here, world_size = global_procs_num, rank = global_current_procs_num
dist.init_process_group(backend=args['dist_backend'], init_method=args['dist_url'],
world_size=args['world_size'], rank=args['rank'])
# create model
if args['pretrained']:
arch = args['arch']
writer.write("=> using pre-trained model '{}'\n".format(arch))
batch_log_writer.write("=> using pre-trained model '{}'".format(arch))
model = models.__dict__[arch](pretrained=True)
else:
arch = args['arch']
writer.write("=> creating model '{}'\n".format(arch))
batch_log_writer.write("=> creating model '{}'\n".format(arch))
model = models.__dict__[arch]()
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args['distributed']:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if gpu is not None:
torch.cuda.set_device(gpu)
model.cuda(gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs of the current node.
ngpus_per_node = args['ngpus_per_node']
args['batch_size'] = int(args['batch_size'] / ngpus_per_node)
args['workers'] = int((args['workers'] + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif gpu is not None:
torch.cuda.set_device(gpu)
model = model.cuda(gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
arch = args['arch']
if arch.startswith('alexnet') or arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion), optimizer, and learning rate scheduler
criterion = nn.CrossEntropyLoss().cuda(gpu)
optimizer = torch.optim.SGD(model.parameters(), args['lr'],
momentum=args['momentum'],
weight_decay=args['weight_decay'])
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
scheduler = StepLR(optimizer, step_size=30, gamma=0.1)
# optionally resume from a checkpoint
if args['resume']:
if os.path.isfile(args['resume']):
writer.write("=> loading checkpoint '{}'\n".format(args['resume']))
batch_log_writer.write("=> loading checkpoint '{}'".format(args['resume']))
if gpu is None:
checkpoint = torch.load(args['resume'])
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(gpu)
checkpoint = torch.load(args['resume'], map_location=loc)
args['start_epoch'] = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
writer.write("=> loaded checkpoint '{}' (epoch {})\n"
.format(args.resume, checkpoint['epoch']))
batch_log_writer.write("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
writer.write("=> no checkpoint found at '{}'\n".format(args.resume))
batch_log_writer.write("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args['data'], 'train')
valdir = os.path.join(args['data'], 'val')
train_tfrecord_file = os.path.join(traindir, "train_clustered.tfrecord")
train_index_file = os.path.join(traindir, "train_clustered.index")
val_tfrecord_file = os.path.join(valdir, "val_clustered.tfrecord")
val_index_file = os.path.join(valdir, "val_clustered.index")
if args['image_type'] == 'RGB':
description = {"image": "byte", "label": "int", "index": "int", "width": "int", "height": "int"}
else:
description = {"image": "byte", "label": "int", "index": "int"}
shuffle_mode = args['shuffle_mode']
# once_shuffle: the image data is shufffled => tfrecords.
# block: the image data is clustered.
# no_shuffle: the image data is clustered.
# epoch_shuffle: directly read the image data from jpgs.
# shuffle_modes = ['once_shuffle', 'block', 'no_shuffle', 'epoch_shuffle']
if shuffle_mode == 'block':
train_dataset = corgipile.dataset.CorgiPileTFRecordDataset(
train_tfrecord_file,
train_index_file,
block_num=args['block_num'],
buffer_size_ratio=args['buffer_size_ratio'],
description=description,
transform=decode_train_raw_image,
trans_after_buffered=True,
distributed=args['distributed'])
elif shuffle_mode == 'no_shuffle' or shuffle_mode == 'once_shuffle':
train_dataset = corgipile.dataset.SeqTFRecordDataset(
train_tfrecord_file,
train_index_file,
num_workers = args['workers'],
description=description,
transform=decode_train_raw_image,
trans_after_buffered=True,
distributed=args['distributed'],
data_partition=True)
elif shuffle_mode == 'epoch_shuffle':
train_dataset = corgipile.dataset.RandomAccessTFRecordDataset(
train_tfrecord_file,
train_index_file,
description=description,
transform=decode_train_raw_image,
trans_after_buffered=True,
distributed=args['distributed'])
val_dataset = corgipile.dataset.SeqTFRecordDataset(
val_tfrecord_file,
val_index_file,
num_workers = args['workers'],
description=description,
transform=decode_val_raw_image,
trans_after_buffered=True,
distributed=args['distributed'],
data_partition=True)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args['batch_size'], shuffle=False,
num_workers=args['workers'], pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args['batch_size'], shuffle=False,
num_workers=args['workers'], pin_memory=True)
if args['evaluate']:
validate(batch_log_writer, val_loader, model, criterion, args)
return
avg_exec_t = 0.0
avg_grad_t = 0.0
avg_loss_t = 0.0
first_exec_t = 0.0
first_grad_t = 0.0
first_loss_t = 0.0
second_exec_t = 0.0
second_grad_t = 0.0
second_loss_t = 0.0
max_acc1 = 0.0
max_acc5 = 0.0
batch_log_writer.write('[%s] Start training\n' % get_current_time())
for epoch in range(args['start_epoch'], args['epochs']):
start = time.time()
if shuffle_mode == 'block' or shuffle_mode == 'epoch_shuffle':
train_dataset.set_epoch(epoch)
with Join([model]):
# train for one epoch
train(batch_log_writer, train_loader, model, criterion, optimizer, epoch, args)
grad_end = time.time()
# evaluate on validation set
acc1, acc5, num_val_records = validate(batch_log_writer, val_loader, model, criterion, args)
loss_end = time.time()
exec_t = loss_end - start
grad_t = grad_end - start
loss_t = exec_t - grad_t
scheduler.step()
i = epoch
avg_exec_t += exec_t
avg_grad_t += grad_t
avg_loss_t += loss_t
if i == 0:
first_exec_t = exec_t
first_grad_t = grad_t
first_loss_t = loss_t
elif i == 1:
second_exec_t = exec_t
second_grad_t = grad_t
second_loss_t = loss_t
writer.write('[%s] [Epoch %2d] acc1 = %.2f, acc5 = %.2f, exec_t = %.2fs, train_t = %.2fs, val_t = %.2fs, num_record = %d\n' %
(get_current_time(), i + 1, acc1, acc5, round(exec_t, 2), round(grad_t, 2), round(loss_t, 2), num_val_records))
writer.flush()
if acc1 > max_acc1:
max_acc1 = acc1
if acc5 > max_acc5:
max_acc5 = acc5
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args['multiprocessing_distributed'] or (args['multiprocessing_distributed']
and args['rank'] % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args['arch'],
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
'scheduler' : scheduler.state_dict()
}, is_best)
epoch_num = args['epochs'] - args['start_epoch']
writer.write('[%s] [Finish] avg_exec_t = %.2fs, avg_train_t = %.2fs, avg_val_t = %.2fs\n' %
(get_current_time(), avg_exec_t / epoch_num,
avg_grad_t / epoch_num, avg_loss_t / epoch_num))
writer.write('\n')
if epoch_num > 2:
avg_exec_t -= first_exec_t
avg_grad_t -= first_grad_t
avg_loss_t -= first_loss_t
writer.write('[%s] [-first] avg_exec_t = %.2fs, avg_train_t = %.2fs, avg_val_t = %.2fs\n' %
(get_current_time(), avg_exec_t / (epoch_num - 1),
avg_grad_t / (epoch_num - 1), avg_loss_t / (epoch_num - 1)))
avg_exec_t -= second_exec_t
avg_grad_t -= second_grad_t
avg_loss_t -= second_loss_t
writer.write('[%s] [-1 & 2] avg_exec_t = %.2fs, avg_train_t = %.2fs, avg_val_t = %.2fs\n' %
(get_current_time(), avg_exec_t / (epoch_num - 2),
avg_grad_t / (epoch_num - 2), avg_loss_t / (epoch_num - 2)))
writer.write('[%s] [MaxAcc] max_acc1 = %.2f, max_acc5 = %.2f\n' %
(get_current_time(), max_acc1, max_acc5))
writer.close()
batch_log_writer.close()
def train(writer, train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
writer,
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
gpu = args['gpu']
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if gpu is not None:
images = images.cuda(gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args['print_freq'] == 0:
progress.display(i)
def validate(writer, val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f', Summary.NONE)
losses = AverageMeter('Loss', ':.4e', Summary.NONE)
top1 = AverageMeter('Acc@1', ':6.2f', Summary.AVERAGE)
top5 = AverageMeter('Acc@5', ':6.2f', Summary.AVERAGE)
progress = ProgressMeter(
writer,
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
gpu = args['gpu']
num_records = 0
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if gpu is not None:
images = images.cuda(gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args['print_freq'] == 0:
progress.display(i)
num_records += images.size(0)
progress.display_summary()
return (top1.avg, top5.avg, num_records)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class Summary(Enum):
NONE = 0
AVERAGE = 1
SUM = 2
COUNT = 3
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE):
self.name = name
self.fmt = fmt
self.summary_type = summary_type
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def summary(self):
fmtstr = ''
if self.summary_type is Summary.NONE:
fmtstr = ''
elif self.summary_type is Summary.AVERAGE:
fmtstr = '{name} {avg:.3f}'
elif self.summary_type is Summary.SUM:
fmtstr = '{name} {sum:.3f}'
elif self.summary_type is Summary.COUNT:
fmtstr = '{name} {count:.3f}'
else:
raise ValueError('invalid summary type %r' % self.summary_type)
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, writer, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
self.writer = writer
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
# print('\t'.join(entries))
self.writer.write('[' + get_current_time() + '] ' + '\t'.join(entries) + '\n')
self.writer.flush()
def display_summary(self):
entries = [" *"]
entries += [meter.summary() for meter in self.meters]
# print(' '.join(entries))
self.writer.write('[' + get_current_time() + '] ' + ' '.join(entries) + '\n')
self.writer.flush()
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
# decode image
def decode_train_RGB_image(features):
width = features["width"][0]
height = features["height"][0]
img_numpy_array = features["image"].reshape((height, width, 3))
img_numpy_array = Image.fromarray(np.uint8(img_numpy_array))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
read_trans = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
features_image = read_trans(img_numpy_array)
features_label = features["label"][0]
return (features_image, features_label)
def decode_train_raw_image(features):
img_bytes = Image.open(io.BytesIO(features["image"])).convert("RGB")
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
read_trans = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
features_image = read_trans(img_bytes)
features_label = features["label"][0]
features['image'] = None
features["label"] = None
return (features_image, features_label)
def decode_val_raw_image(features):
img_bytes = Image.open(io.BytesIO(features["image"])).convert("RGB")
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
read_trans = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
features_image = read_trans(img_bytes)
features_label = features["label"][0]
features['image'] = None
features["label"] = None
return (features_image, features_label)
def decode_val_RGB_image(features):
width = features["width"][0]
height = features["height"][0]
img_numpy_array = features["image"].reshape((height, width, 3))
img_numpy_array = Image.fromarray(np.uint8(img_numpy_array))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
read_trans = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
features_image = read_trans(img_numpy_array)
features_label = features["label"][0]
return (features_image, features_label)
def get_current_time() :
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def get_current_time_filename():
return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
if __name__ == '__main__':
main() | 30,880 | 35.075935 | 137 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/shuffleformat/corgipile/dataset.py | import typing
import numpy as np
import datetime
import random
import time
import math
import torch.utils.data
import torch.distributed as dist
from shuffleformat.corgipile import block_reader_tfrecord
from shuffleformat.corgipile import block_iterator_utils
from shuffleformat.corgipile import seq_reader_tfrecord
class CorgiPileTFRecordDataset(torch.utils.data.IterableDataset):
def __init__(self,
data_path: str,
index_path: str,
block_num: int,
buffer_size_ratio: float,
description: typing.Union[typing.List[str], typing.Dict[str, str], None] = None,
drop_last=False,
transform: typing.Callable[[dict], typing.Any] = None,
trans_after_buffered=True,
distributed=False
) -> None:
super(CorgiPileTFRecordDataset, self).__init__()
self.data_path = data_path
self.index_path = index_path
# note that this block_num is an indication, the real number is more or less than it.
self.block_num = block_num
self.buffer_size_ratio = buffer_size_ratio
self.description = description
self.drop_last = drop_last
self.transform = transform
self.trans_after_buffered = trans_after_buffered
self.epoch = 0
# e.g., [0, 71, 142, 213, 284, 355, 426, 497, 568, 639, 710, 781, 852, 923, 994, 1065, 1136]
self.block_index_list = self.split_index_to_blocks()
# num_blocks = 17
num_blocks = len(self.block_index_list)
if dist.is_available() and distributed:
# e.g., world_size = 4
world_size = dist.get_world_size()
# rank = 0 or 1 or 2 or 3
rank = dist.get_rank()
print('world_size = ', world_size, ', rank = ', rank)
# rank = 0/1/2/3, world_size = 4
# start_index =
# 17 * 0 // 4 = 0
# 17 * 1 // 4 = 4
# 17 * 2 // 4 = 8
# 17 * 3 // 4 = 12
self.start_block_index = (num_blocks * rank) // world_size
# end_index =
# 17 * 1 // 4 = 4
# 17 * 2 // 4 = 8
# 17 * 3 // 4 = 12
# 17 * 4 // 4 = 17
# block[0, 4), block[4, 8), block[8, 12), block[12, 17)
self.end_block_index = (num_blocks * (rank + 1)) // world_size
if self.end_block_index >= num_blocks:
self.end_block_index = num_blocks
# self.buffer_size = int(self.num_records / world_size * self.buffer_size_ratio)
self.buffer_size = int(self.num_records * self.buffer_size_ratio)
self.length = int(self.num_records / world_size)
else:
self.start_block_index = 0
self.end_block_index = num_blocks
self.buffer_size = int(self.num_records * self.buffer_size_ratio)
self.length = self.num_records
print("[Tip] Using datast.set_epoch(epoch) to shuffle the blocks before each epoch.")
# shuffle the block_index_list before each epoch
def set_epoch(self, epoch):
# shuffle the blocks' indexes
random.seed(epoch)
random.shuffle(self.block_index_list)
def split_index_to_blocks(self):
# e.g., [0, 71, 142, 213, 284, 355, 426, 497, 568, 639]
start_time = datetime.datetime.now()
index = np.loadtxt(self.index_path, dtype=np.int64)[:, 0]
end_time = datetime.datetime.now()
str_time = '[index loading time] %dms' % ((end_time - start_time).seconds * 1000 + (end_time - start_time).microseconds / 1000)
print(str_time)
self.num_records = len(index)
assert(self.block_num <= self.num_records)
block_tuple_num = int(self.num_records / self.block_num)
# re-compute the real block_num
self.block_num = math.ceil(self.num_records / block_tuple_num)
block_index_list = []
for idx in range(0, self.block_num):
start_index = block_tuple_num * idx
end_index = block_tuple_num * (idx + 1)
if end_index < self.num_records:
end_byte = index[end_index]
else:
end_byte = None
if start_index < self.num_records:
start_byte = index[start_index]
# TODO: start_byte and end_type should be Long for very large dataset
block_index_list.append((start_byte, end_byte))
else:
print('ERROR: start_index >= self.num_records!')
print ('[param] num_records = %d, real_block_num = %d, block_tuple_num = %d' % (self.num_records, self.block_num, block_tuple_num) )
return block_index_list
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None and worker_info.num_workers > 1:
shard = worker_info.id, worker_info.num_workers
else:
shard = None
if self.trans_after_buffered:
dataset_block_iter = block_reader_tfrecord.tfrecord_block_loader(
self.data_path,
self.block_index_list,
self.start_block_index,
self.end_block_index,
self.description, transform=None, shard=shard)
else:
dataset_block_iter = block_reader_tfrecord.tfrecord_block_loader(
self.data_path,
self.block_index_list,
self.start_block_index,
self.end_block_index,
self.description, self.transform, shard)
if self.buffer_size > 0:
it = block_iterator_utils.shuffle_iterator(dataset_block_iter, self.buffer_size)
else:
it = dataset_block_iter
if self.trans_after_buffered:
it = map(self.transform, it)
return it
def __len__(self):
return self.length
'''
For multi processes (nodes),
(1) specify `num_workers` and `distributed = True` and `partition = True`. Each process will load one part of the dataset.
block_num = num_workers * world_size, start_block_index = block_num * rank // world_size, end_block_index = block_num * (rank + 1) // world_size
(2) specify `num_workers` and `distributed = True` and `partition = False`. Each process will load the whole dataset.
block_num = num_workers, start_block_index = 0, end_block_index = num_workers
For single process (node),
(1) Specify `num_workers` and `distributed = False`, each data loading thread will load one part of the dataset.
block_num = num_workers, start_block_index = 0, end_block_index = num_workers
'''
class SeqTFRecordDataset(torch.utils.data.IterableDataset):
# distributed = true: partition the dataset into differnt splits for each process
# num_workers = the number of data loading thread for each process
def __init__(self,
data_path: str,
index_path: str,
num_workers: int,
description: typing.Union[typing.List[str], typing.Dict[str, str], None] = None,
drop_last=False,
transform: typing.Callable[[dict], typing.Any] = None,
trans_after_buffered=True,
distributed = False,
data_partition=True
) -> None:
super(SeqTFRecordDataset, self).__init__()
self.data_path = data_path
self.index_path = index_path
self.description = description
self.drop_last = drop_last
self.transform = transform
self.trans_after_buffered = trans_after_buffered
self.epoch = 0
if dist.is_available() and distributed:
# e.g., world_size = 4
world_size = dist.get_world_size()
# rank = 0 or 1 or 2 or 3
rank = dist.get_rank()
print('world_size = ', world_size, ', rank = ', rank)
if data_partition:
# suppose rank = 0/1/2/3, world_size = 4, num_workers = 2
self.block_num = world_size * num_workers
# start_index =
# 8 * 0 // 4 = 0
# 8 * 1 // 4 = 2
# 8 * 2 // 4 = 4
# 8 * 3 // 4 = 6
self.block_index_list = self.split_index_to_blocks()
self.start_block_index = (self.block_num * rank) // world_size
# end_index =
# 8 * 1 // 4 = 2
# 8 * 2 // 4 = 4
# 8 * 3 // 4 = 6
# 8 * 4 // 4 = 8
# block[0, 2), block[2, 4), block[4, 6), block[6, 8)
self.end_block_index = (self.block_num * (rank + 1)) // world_size
if self.end_block_index >= self.block_num:
self.end_block_index = self.block_num
self.length = int(self.num_records / world_size)
if self.num_records % world_size != 0:
print('[Warning] the number of records', self.num_records, 'cannot be euqally divided by the world_size', world_size, '!')
else:
self.block_num = num_workers
self.start_block_index = 0
self.end_block_index = self.block_num
self.block_index_list = self.split_index_to_blocks()
self.length = self.num_records
else:
self.block_num = num_workers
self.start_block_index = 0
self.end_block_index = self.block_num
self.block_index_list = self.split_index_to_blocks()
self.length = self.num_records
assert(len(self.block_index_list) == self.block_num)
def split_index_to_blocks(self):
# e.g., [0, 71, 142, 213, 284, 355, 426, 497, 568, 639]
start_time = datetime.datetime.now()
index = np.loadtxt(self.index_path, dtype=np.int64)[:, 0]
end_time = datetime.datetime.now()
str_time = '[index loading time] %dms' % ((end_time - start_time).seconds * 1000 + (end_time - start_time).microseconds / 1000)
print(str_time)
self.num_records = len(index)
assert(self.block_num <= self.num_records)
block_tuple_num = int(self.num_records / self.block_num)
# re-compute the real block_num
self.block_num = math.ceil(self.num_records / block_tuple_num)
block_index_list = []
for idx in range(0, self.block_num):
start_index = block_tuple_num * idx
end_index = block_tuple_num * (idx + 1)
if end_index < self.num_records:
end_byte = index[end_index]
else:
end_byte = None
if start_index < self.num_records:
start_byte = index[start_index]
# TODO: start_byte and end_type should be Long for very large dataset
block_index_list.append((start_byte, end_byte))
else:
print('ERROR: start_index >= self.num_records!')
print ('[param] num_records = %d, real_block_num = %d, block_tuple_num = %d' % (self.num_records, self.block_num, block_tuple_num) )
return block_index_list
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None and worker_info.num_workers > 1:
shard = worker_info.id, worker_info.num_workers
else:
shard = None
if self.trans_after_buffered:
it = block_reader_tfrecord.tfrecord_block_loader(
self.data_path,
self.block_index_list,
self.start_block_index,
self.end_block_index,
self.description, transform=None, shard=shard)
else:
it = block_reader_tfrecord.tfrecord_block_loader(
self.data_path,
self.block_index_list,
self.start_block_index,
self.end_block_index,
self.description, self.transform, shard)
if self.trans_after_buffered:
it = map(self.transform, it)
return it
def __len__(self):
return self.length
class RandomAccessTFRecordDataset(torch.utils.data.IterableDataset):
def __init__(self,
data_path: str,
index_path: str,
description: typing.Union[typing.List[str], typing.Dict[str, str], None] = None,
drop_last=False,
transform: typing.Callable[[dict], typing.Any] = None,
trans_after_buffered=True,
distributed=False
) -> None:
super(RandomAccessTFRecordDataset, self).__init__()
self.data_path = data_path
self.index_path = index_path
self.description = description
self.drop_last = drop_last
self.transform = transform
self.trans_after_buffered = trans_after_buffered
self.epoch = 0
# e.g., [0, 71, 142, 213, 284, 355, 426, 497, 568, 639, 710, 781, 852, 923, 994, 1065, 1136]
self.block_index_list = self.split_index_to_blocks()
# num_blocks = 17
num_blocks = len(self.block_index_list)
if dist.is_available() and distributed:
# e.g., world_size = 4
world_size = dist.get_world_size()
# rank = 0 or 1 or 2 or 3
rank = dist.get_rank()
print('world_size = ', world_size, ', rank = ', rank)
# rank = 0/1/2/3, world_size = 4
# start_index =
# 17 * 0 // 4 = 0
# 17 * 1 // 4 = 4
# 17 * 2 // 4 = 8
# 17 * 3 // 4 = 12
self.start_block_index = (num_blocks * rank) // world_size
# end_index =
# 17 * 1 // 4 = 4
# 17 * 2 // 4 = 8
# 17 * 3 // 4 = 12
# 17 * 4 // 4 = 17
# block[0, 4), block[4, 8), block[8, 12), block[12, 17)
self.end_block_index = (num_blocks * (rank + 1)) // world_size
if self.end_block_index >= num_blocks:
self.end_block_index = num_blocks
self.length = int(self.num_records / world_size)
else:
self.start_block_index = 0
self.end_block_index = num_blocks
self.length = self.num_records
print("[Tip] Using datast.set_epoch(epoch) to shuffle the blocks before each epoch.")
# shuffle the block_index_list before each epoch
def set_epoch(self, epoch):
# shuffle the blocks' indexes
random.seed(epoch)
random.shuffle(self.block_index_list)
# print(self.block_index_list[0])
# print(self.block_index_list[1])
# print(self.block_index_list[2])
# print(self.block_index_list[3])
# print(self.block_index_list[4])
def split_index_to_blocks(self):
# e.g., [0, 71, 142, 213, 284, 355, 426, 497, 568, 639]
start_time = datetime.datetime.now()
index = np.loadtxt(self.index_path, dtype=np.int64)[:, 0]
end_time = datetime.datetime.now()
str_time = '[index loading time] %dms' % ((end_time - start_time).seconds * 1000 + (end_time - start_time).microseconds / 1000)
print(str_time)
#print(index)
self.num_records = len(index)
self.block_num = self.num_records
block_tuple_num = 1
print ('[param] block_num = %d, block_tuple_num = %d' % (self.block_num, block_tuple_num) )
block_index_list = []
for idx in range(0, self.block_num):
start_index = block_tuple_num * idx
end_index = block_tuple_num * (idx + 1)
if end_index < self.num_records:
end_byte = index[end_index]
else:
end_byte = None
if start_index < self.num_records:
start_byte = index[start_index]
# TODO: start_byte and end_type should be Long for very large dataset
block_index_list.append((start_byte, end_byte))
#print(block_index_list)
return block_index_list
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None and worker_info.num_workers > 1:
shard = worker_info.id, worker_info.num_workers
else:
shard = None
if self.trans_after_buffered:
it = block_reader_tfrecord.tfrecord_block_loader(
self.data_path,
self.block_index_list,
self.start_block_index,
self.end_block_index,
self.description, transform=None, shard=shard)
else:
it = block_reader_tfrecord.tfrecord_block_loader(
self.data_path,
self.block_index_list,
self.start_block_index,
self.end_block_index,
self.description, self.transform, shard)
if self.trans_after_buffered:
it = map(self.transform, it)
return it
def __len__(self):
return self.length
| 18,063 | 36.168724 | 152 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/shuffleformat/corgipile/block_iterator_utils.py | """Iterator utils."""
from __future__ import division
import typing
import warnings
import random
import datetime
import numpy as np
import torch.distributed as dist
def shuffle_iterator(iterator: typing.Iterator,
buffer_size: int) -> typing.Iterable[typing.Any]:
random.seed()
end_file = False
while end_file is False:
buffer = []
try:
for _ in range(buffer_size):
record = next(iterator)
buffer.append(record)
except StopIteration:
end_file = True
if buffer:
random.shuffle(buffer)
for item in buffer:
yield item
| 687 | 18.111111 | 70 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/shuffleformat/tfrecord/__init__.py | from shuffleformat.tfrecord import tools
from shuffleformat.tfrecord import torch
from shuffleformat.tfrecord import example_pb2
from shuffleformat.tfrecord import iterator_utils
from shuffleformat.tfrecord import reader
from shuffleformat.tfrecord import writer
from shuffleformat.tfrecord.iterator_utils import *
from shuffleformat.tfrecord.reader import *
from shuffleformat.tfrecord.writer import *
| 405 | 32.833333 | 51 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/shuffleformat/tfrecord/torch/dataset.py | """Load tfrecord files into torch datasets."""
import typing
import numpy as np
import torch.utils.data
from shuffleformat.tfrecord import reader
from shuffleformat.tfrecord import iterator_utils
class TFRecordDataset(torch.utils.data.IterableDataset):
"""Parse (generic) TFRecords dataset into `IterableDataset` object,
which contain `np.ndarrays`s. By default (when `sequence_description`
is None), it treats the TFRecords as containing `tf.Example`.
Otherwise, it assumes it is a `tf.SequenceExample`.
Params:
-------
data_path: str
The path to the tfrecords file.
index_path: str or None
The path to the index file.
description: list or dict of str, optional, default=None
List of keys or dict of (key, value) pairs to extract from each
record. The keys represent the name of the features and the
values ("byte", "float", or "int") correspond to the data type.
If dtypes are provided, then they are verified against the
inferred type for compatibility purposes. If None (default),
then all features contained in the file are extracted.
shuffle_queue_size: int, optional, default=None
Length of buffer. Determines how many records are queued to
sample from.
transform : a callable, default = None
A function that takes in the input `features` i.e the dict
provided in the description, transforms it and returns a
desirable output.
sequence_description: list or dict of str, optional, default=None
Similar to `description`, but refers to the sequence features
within a `SequenceExample`. When this field is `None`, then it
is assumed that an `Example` is being read otherwise, a
`SequenceExample` is read. If an empty list or dictionary is
passed, then all features contained in the file are extracted.
compression_type: str, optional, default=None
The type of compression used for the tfrecord. Choose either
'gzip' or None.
"""
def __init__(self,
data_path: str,
index_path: typing.Union[str, None],
description: typing.Union[typing.List[str], typing.Dict[str, str], None] = None,
shuffle_queue_size: typing.Optional[int] = None,
transform: typing.Callable[[dict], typing.Any] = None,
sequence_description: typing.Union[typing.List[str], typing.Dict[str, str], None] = None,
compression_type: typing.Optional[str] = None,
) -> None:
super(TFRecordDataset, self).__init__()
self.data_path = data_path
self.index_path = index_path
self.description = description
self.sequence_description = sequence_description
self.shuffle_queue_size = shuffle_queue_size
self.transform = transform or (lambda x: x)
self.compression_type = compression_type
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
shard = worker_info.id, worker_info.num_workers
np.random.seed(worker_info.seed % np.iinfo(np.uint32).max)
else:
shard = None
it = reader.tfrecord_loader(data_path=self.data_path,
index_path=self.index_path,
description=self.description,
shard=shard,
sequence_description=self.sequence_description,
compression_type=self.compression_type)
if self.shuffle_queue_size:
it = iterator_utils.shuffle_iterator(it, self.shuffle_queue_size)
if self.transform:
it = map(self.transform, it)
return it
class MultiTFRecordDataset(torch.utils.data.IterableDataset):
"""Parse multiple (generic) TFRecords datasets into an `IterableDataset`
object, which contain `np.ndarrays`s.
Params:
-------
data_pattern: str
Input data path pattern.
index_pattern: str or None
Input index path pattern.
splits: dict
Dictionary of (key, value) pairs, where the key is used to
construct the data and index path(s) and the value determines
the contribution of each split to the batch.
description: list or dict of str, optional, default=None
List of keys or dict of (key, value) pairs to extract from each
record. The keys represent the name of the features and the
values ("byte", "float", or "int") correspond to the data type.
If dtypes are provided, then they are verified against the
inferred type for compatibility purposes. If None (default),
then all features contained in the file are extracted.
shuffle_queue_size: int, optional, default=None
Length of buffer. Determines how many records are queued to
sample from.
transform : a callable, default = None
A function that takes in the input `features` i.e the dict
provided in the description, transforms it and returns a
desirable output.
sequence_description: list or dict of str, optional, default=None
Similar to `description`, but refers to the sequence features
within a `SequenceExample`. When this field is `None`, then it
is assumed that an `Example` is being read otherwise, a
`SequenceExample` is read. If an empty list or dictionary is
passed, then all features contained in the file are extracted.
compression_type: str, optional, default=None
The type of compression used for the tfrecord. Choose either
'gzip' or None.
infinite: bool, optional, default=True
Whether the Dataset should be infinite or not
"""
def __init__(self,
data_pattern: str,
index_pattern: typing.Union[str, None],
splits: typing.Dict[str, float],
description: typing.Union[typing.List[str], typing.Dict[str, str], None] = None,
shuffle_queue_size: typing.Optional[int] = None,
transform: typing.Callable[[dict], typing.Any] = None,
sequence_description: typing.Union[typing.List[str], typing.Dict[str, str], None] = None,
compression_type: typing.Optional[str] = None,
infinite: bool = True
) -> None:
super(MultiTFRecordDataset, self).__init__()
self.data_pattern = data_pattern
self.index_pattern = index_pattern
self.splits = splits
self.description = description
self.sequence_description = sequence_description
self.shuffle_queue_size = shuffle_queue_size
self.transform = transform
self.compression_type = compression_type
self.infinite = infinite
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
np.random.seed(worker_info.seed % np.iinfo(np.uint32).max)
it = reader.multi_tfrecord_loader(data_pattern=self.data_pattern,
index_pattern=self.index_pattern,
splits=self.splits,
description=self.description,
sequence_description=self.sequence_description,
compression_type=self.compression_type,
infinite=self.infinite,
)
if self.shuffle_queue_size:
it = iterator_utils.shuffle_iterator(it, self.shuffle_queue_size)
if self.transform:
it = map(self.transform, it)
return it
| 7,917 | 42.505495 | 106 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/shuffleformat/tfrecord/torch/__init__.py | from shuffleformat.tfrecord.torch import dataset
from shuffleformat.tfrecord.torch.dataset import TFRecordDataset
from shuffleformat.tfrecord.torch.dataset import MultiTFRecordDataset
| 185 | 36.2 | 69 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlpformat/in_mem_sliding_window/dataset.py | import numpy as np
import warnings
import random
import time
import os
import torch.utils.data
from nlpformat.loader import nlp_format_dataloader
class InMemSlidingWindowDocDataset(torch.utils.data.IterableDataset):
def __init__(self,
data_folder: str,
split: str,
use_clustered_data: bool,
sliding_window_size_ratio: float
) -> None:
super(InMemSlidingWindowDocDataset, self).__init__()
self.data_folder = data_folder
self.use_clustered_data = use_clustered_data
split = split.upper()
assert split in {'TRAIN', 'TEST'}
self.split = split
self.data_buffer = []
self.sliding_window_size_ratio = sliding_window_size_ratio
self.__load_data__()
self.shuffle_window_size = int(self.sliding_window_size_ratio * self.num_records)
def __load_data__(self):
print('[%s] Start loading data into memory' % nlp_format_dataloader.get_current_time())
load_start_time = time.time()
self.data = torch.load(os.path.join(self.data_folder, self.split + '_data.pth.tar'))
self.num_records = len(self.data['labels'])
if (self.use_clustered_data):
zipped = zip(self.data['docs'], self.data['sentences_per_document'],
self.data['words_per_sentence'], self.data['labels'])
sort_zipped = sorted(zipped, key=lambda x:(x[3]))
self.data['docs'], self.data['sentences_per_document'], self.data['words_per_sentence'], self.data['labels'] = zip(*sort_zipped)
for i in range(0, self.num_records):
self.data_buffer.append((self.data['docs'][i], self.data['sentences_per_document'][i], self.data['words_per_sentence'][i], self.data['labels'][i]))
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (nlp_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def __iter__(self):
it = self.in_mem_sliding_window_iterator()
it = map(self.transform_doc, it)
return it
def __len__(self):
return self.num_records
def in_mem_sliding_window_iterator(self):
buffer = []
try:
for i in range(0, self.shuffle_window_size):
buffer.append(self.data_buffer[i])
except StopIteration:
warnings.warn("Number of elements in the iterator is less than the "
f"queue size (N={self.shuffle_window_size}).")
buffer_size = self.shuffle_window_size
for i in range(buffer_size, self.num_records):
index = random.randint(0, buffer_size - 1)
item = buffer[index]
buffer[index] = self.data_buffer[i]
yield item
random.shuffle(buffer)
for i in range(0, len(buffer)):
yield buffer[i]
def transform_doc(self, doc_item):
return torch.LongTensor(doc_item[0]), \
torch.LongTensor([doc_item[1]]), \
torch.LongTensor(doc_item[2]), \
torch.LongTensor([doc_item[3]])
class InMemSlidingWindowSentDataset(torch.utils.data.IterableDataset):
def __init__(self,
data_folder: str,
split: str,
use_clustered_data: bool,
sliding_window_size_ratio: float
) -> None:
super(InMemSlidingWindowSentDataset, self).__init__()
self.data_folder = data_folder
self.use_clustered_data = use_clustered_data
split = split.upper()
assert split in {'TRAIN', 'TEST'}
self.split = split
self.data_buffer = []
self.sliding_window_size_ratio = sliding_window_size_ratio
self.__load_data__()
self.shuffle_window_size = int(self.sliding_window_size_ratio * self.num_records)
def __load_data__(self):
print('[%s] Start loading data into memory' % nlp_format_dataloader.get_current_time())
load_start_time = time.time()
self.data = torch.load(os.path.join(self.data_folder, self.split + '_data.pth.tar'))
self.num_records = len(self.data['labels'])
if (self.use_clustered_data):
zipped = zip(self.data['sents'],
self.data['words_per_sentence'], self.data['labels'])
sort_zipped = sorted(zipped, key=lambda x:(x[2]))
self.data['sents'], self.data['words_per_sentence'], self.data['labels'] = zip(*sort_zipped)
for i in range(0, self.num_records):
self.data_buffer.append((self.data['sents'][i], self.data['words_per_sentence'][i], self.data['labels'][i]))
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (nlp_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def __iter__(self):
it = self.in_mem_sliding_window_iterator()
it = map(self.transform_sent, it)
return it
def __len__(self):
return self.num_records
def in_mem_sliding_window_iterator(self):
buffer = []
try:
for i in range(0, self.shuffle_window_size):
buffer.append(self.data_buffer[i])
except StopIteration:
warnings.warn("Number of elements in the iterator is less than the "
f"queue size (N={self.shuffle_window_size}).")
buffer_size = self.shuffle_window_size
for i in range(buffer_size, self.num_records):
index = random.randint(0, buffer_size - 1)
item = buffer[index]
buffer[index] = self.data_buffer[i]
yield item
random.shuffle(buffer)
for i in range(0, len(buffer)):
yield buffer[i]
def transform_sent(self, doc_item):
return torch.LongTensor(doc_item[0]), \
torch.LongTensor([doc_item[1]]), \
torch.LongTensor([doc_item[2]])
| 6,152 | 32.622951 | 159 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlpformat/in_mem_block/block_dataset.py | """Load tfrecord files into torch datasets."""
import typing
import numpy as np
import datetime
import random
import time
import os
import torch.utils.data
from nlpformat.loader import nlp_format_dataloader
from nlpformat.in_mem_block import block_iterator_utils
class InMemBlockDocDataset(torch.utils.data.IterableDataset):
def __init__(self,
data_folder: str,
split: str,
use_clustered_data: bool,
block_num: int,
buffer_size_ratio: float,
drop_last=False
) -> None:
super(InMemBlockDocDataset, self).__init__()
self.data_folder = data_folder
self.use_clustered_data = use_clustered_data
split = split.upper()
assert split in {'TRAIN', 'TEST'}
self.split = split
self.data_buffer = []
self.block_num = block_num
self.__load_data__()
self.buffer_size = int(self.num_records * buffer_size_ratio)
self.drop_last = drop_last
self.block_index_list = self.split_index_to_blocks()
# print("[Tip] Using datast.set_epoch(epoch) to shuffle the blocks before each epoch.")
def __load_data__(self):
print('[%s] Start loading data into memory' % nlp_format_dataloader.get_current_time())
load_start_time = time.time()
self.data = torch.load(os.path.join(self.data_folder, self.split + '_data.pth.tar'))
self.num_records = len(self.data['labels'])
if (self.use_clustered_data):
zipped = zip(self.data['docs'], self.data['sentences_per_document'],
self.data['words_per_sentence'], self.data['labels'])
sort_zipped = sorted(zipped, key=lambda x:(x[3]))
self.data['docs'], self.data['sentences_per_document'], self.data['words_per_sentence'], self.data['labels'] = zip(*sort_zipped)
for i in range(0, self.num_records):
self.data_buffer.append((self.data['docs'][i], self.data['sentences_per_document'][i], self.data['words_per_sentence'][i], self.data['labels'][i]))
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (nlp_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def split_index_to_blocks(self):
# e.g., [0, 71, 142, 213, 284, 355, 426, 497, 568, 639]
assert(self.block_num < self.num_records)
block_tuple_num = int(self.num_records / self.block_num)
print ('[param] block_num = %d, block_tuple_num = %d' % (self.block_num, block_tuple_num) )
# store index_id instead of file_offset
block_index_list = []
for idx in range(0, self.block_num):
start_index = block_tuple_num * idx
end_index = block_tuple_num * (idx + 1)
if end_index > self.num_records:
end_index = self.num_records
if start_index < self.num_records:
block_index_list.append((start_index, end_index))
#print(block_index_list)
return block_index_list
def __iter__(self):
it = self.in_mem_block_iterator()
it = block_iterator_utils.shuffle_iterator(it, self.buffer_size)
it = map(self.transform_doc_item, it)
return it
def in_mem_block_iterator(self):
random.shuffle(self.block_index_list)
# [(0, 71), (213, 284), (142, 213), (71, 142), (284, 355)]
for block_index in self.block_index_list:
for data_index in range(block_index[0], block_index[1]):
yield self.data_buffer[data_index]
def __len__(self):
return self.num_records
def transform_doc_item(self, doc_item):
return torch.LongTensor(doc_item[0]), \
torch.LongTensor([doc_item[1]]), \
torch.LongTensor(doc_item[2]), \
torch.LongTensor([doc_item[3]])
class InMemBlockSentDataset(torch.utils.data.IterableDataset):
def __init__(self,
data_folder: str,
split: str,
use_clustered_data: bool,
block_num: int,
buffer_size_ratio: float,
drop_last=False
) -> None:
super(InMemBlockSentDataset, self).__init__()
self.data_folder = data_folder
self.use_clustered_data = use_clustered_data
split = split.upper()
assert split in {'TRAIN', 'TEST'}
self.split = split
self.data_buffer = []
self.block_num = block_num
self.__load_data__()
self.buffer_size = int(self.num_records * buffer_size_ratio)
self.drop_last = drop_last
self.block_index_list = self.split_index_to_blocks()
# print("[Tip] Using datast.set_epoch(epoch) to shuffle the blocks before each epoch.")
def __load_data__(self):
print('[%s] Start loading data into memory' % nlp_format_dataloader.get_current_time())
load_start_time = time.time()
self.data = torch.load(os.path.join(self.data_folder, self.split + '_data.pth.tar'))
self.num_records = len(self.data['labels'])
if (self.use_clustered_data):
zipped = zip(self.data['sents'],
self.data['words_per_sentence'], self.data['labels'])
sort_zipped = sorted(zipped, key=lambda x:(x[2]))
self.data['sents'], self.data['words_per_sentence'], self.data['labels'] = zip(*sort_zipped)
for i in range(0, self.num_records):
self.data_buffer.append((self.data['sents'][i], self.data['words_per_sentence'][i], self.data['labels'][i]))
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (nlp_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def split_index_to_blocks(self):
# e.g., [0, 71, 142, 213, 284, 355, 426, 497, 568, 639]
assert(self.block_num < self.num_records)
block_tuple_num = int(self.num_records / self.block_num)
print ('[param] block_num = %d, block_tuple_num = %d' % (self.block_num, block_tuple_num) )
# store index_id instead of file_offset
block_index_list = []
for idx in range(0, self.block_num):
start_index = block_tuple_num * idx
end_index = block_tuple_num * (idx + 1)
if end_index > self.num_records:
end_index = self.num_records
if start_index < self.num_records:
block_index_list.append((start_index, end_index))
#print(block_index_list)
return block_index_list
def __iter__(self):
it = self.in_mem_block_iterator()
it = block_iterator_utils.shuffle_iterator(it, self.buffer_size)
it = map(self.transform_sent_item, it)
return it
def in_mem_block_iterator(self):
random.shuffle(self.block_index_list)
# [(0, 71), (213, 284), (142, 213), (71, 142), (284, 355)]
for block_index in self.block_index_list:
for data_index in range(block_index[0], block_index[1]):
yield self.data_buffer[data_index]
def __len__(self):
return self.num_records
def transform_sent_item(self, doc_item):
return torch.LongTensor(doc_item[0]), \
torch.LongTensor([doc_item[1]]), \
torch.LongTensor([doc_item[2]])
| 7,594 | 33.522727 | 159 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlpformat/in_mem_bismarck/dataset.py | """Load tfrecord files into torch datasets."""
import numpy as np
import os
import pickle
import time
import torch.utils.data
from nlpformat.loader import nlp_format_dataloader
from nlpformat.in_mem_bismarck import iterator_utils
class InMemBismarckDocDataset(torch.utils.data.IterableDataset):
def __init__(self,
data_folder: str,
split: str,
use_clustered_data: bool,
bismarck_buffer_size_ratio: float,
select_ratio_from_old_buffer: float,
old_buffer_checkpoint_dir: str
) -> None:
super(InMemBismarckDocDataset, self).__init__()
self.data_folder = data_folder
self.use_clustered_data = use_clustered_data
split = split.upper()
assert split in {'TRAIN', 'TEST'}
self.split = split
self.data_buffer = []
self.bismarck_buffer_size_ratio = bismarck_buffer_size_ratio
self.select_ratio_from_old_buffer = select_ratio_from_old_buffer
self.__load_data__()
self.total_records_num = len(self.data_buffer)
#self.num_records_from_old_buffer = self.total_records_num * self.select_ratio_on_old_buffer
self.io_buffer_size = int(self.total_records_num * bismarck_buffer_size_ratio)
assert(self.io_buffer_size > 0)
# buffer in the memory worker in Bismarck's SIGMOD paper
self.old_buffer = []
self.io_buffer = []
self.old_buffer_checkpoint_dir = old_buffer_checkpoint_dir
if self.old_buffer_checkpoint_dir:
self.delete_old_buffer()
def __iter__(self):
it = self.in_mem_bismarck_loader()
id = 1
self.load_old_buffer(id)
file_writer = self.old_buffer_writer(id)
it = iterator_utils.shuffle_iterator(it,
self.io_buffer, self.io_buffer_size,
self.total_records_num,
self.old_buffer,
self.select_ratio_from_old_buffer,
file_writer)
it = map(self.transform_doc, it)
return it
def load_old_buffer(self, id):
old_buffer_file = os.path.join(self.old_buffer_checkpoint_dir, '_old_buffer_' + str(id) +'.dat')
if os.path.exists(old_buffer_file):
file = open(old_buffer_file, 'rb')
self.old_buffer = pickle.load(file)
#self.io_buffer = pickle.load(file)
file.close()
def old_buffer_writer(self, id):
if not os.path.exists(self.old_buffer_checkpoint_dir):
os.makedirs(self.old_buffer_checkpoint_dir)
old_buffer_file = os.path.join(self.old_buffer_checkpoint_dir, '_old_buffer_' + str(id) +'.dat')
file_writer = open(old_buffer_file, 'wb')
return file_writer
def delete_old_buffer(self):
if os.path.exists(self.old_buffer_checkpoint_dir):
files = os.listdir(self.old_buffer_checkpoint_dir)
for file in files:
if file.startswith('_old_buffer_') and file.endswith('.dat'):
os.remove(os.path.join(self.old_buffer_checkpoint_dir, file))
def transform_doc(self, doc_item):
return torch.LongTensor(doc_item[0]), \
torch.LongTensor([doc_item[1]]), \
torch.LongTensor(doc_item[2]), \
torch.LongTensor([doc_item[3]])
def __len__(self):
return self.total_records_num
def __load_data__(self):
print('[%s] Start loading data into memory' % nlp_format_dataloader.get_current_time())
load_start_time = time.time()
self.data = torch.load(os.path.join(self.data_folder, self.split + '_data.pth.tar'))
self.num_records = len(self.data['labels'])
if (self.use_clustered_data):
zipped = zip(self.data['docs'], self.data['sentences_per_document'],
self.data['words_per_sentence'], self.data['labels'])
sort_zipped = sorted(zipped, key=lambda x:(x[3]))
self.data['docs'], self.data['sentences_per_document'], self.data['words_per_sentence'], self.data['labels'] = zip(*sort_zipped)
for i in range(0, self.num_records):
self.data_buffer.append((self.data['docs'][i], self.data['sentences_per_document'][i], self.data['words_per_sentence'][i], self.data['labels'][i]))
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (nlp_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def in_mem_bismarck_loader(self):
for data_index in range(0, self.total_records_num):
yield self.data_buffer[data_index]
class InMemBismarckSentDataset(torch.utils.data.IterableDataset):
def __init__(self,
data_folder: str,
split: str,
use_clustered_data: bool,
bismarck_buffer_size_ratio: float,
select_ratio_from_old_buffer: float,
old_buffer_checkpoint_dir: str
) -> None:
super(InMemBismarckSentDataset, self).__init__()
self.data_folder = data_folder
self.use_clustered_data = use_clustered_data
split = split.upper()
assert split in {'TRAIN', 'TEST'}
self.split = split
self.data_buffer = []
self.bismarck_buffer_size_ratio = bismarck_buffer_size_ratio
self.select_ratio_from_old_buffer = select_ratio_from_old_buffer
self.__load_data__()
self.total_records_num = len(self.data_buffer)
#self.num_records_from_old_buffer = self.total_records_num * self.select_ratio_on_old_buffer
self.io_buffer_size = int(self.total_records_num * bismarck_buffer_size_ratio)
assert(self.io_buffer_size > 0)
# buffer in the memory worker in Bismarck's SIGMOD paper
self.old_buffer = []
self.io_buffer = []
self.old_buffer_checkpoint_dir = old_buffer_checkpoint_dir
if self.old_buffer_checkpoint_dir:
self.delete_old_buffer()
def __iter__(self):
it = self.in_mem_bismarck_loader()
id = 1
self.load_old_buffer(id)
file_writer = self.old_buffer_writer(id)
it = iterator_utils.shuffle_iterator(it,
self.io_buffer, self.io_buffer_size,
self.total_records_num,
self.old_buffer,
self.select_ratio_from_old_buffer,
file_writer)
it = map(self.transform_sent, it)
return it
def load_old_buffer(self, id):
old_buffer_file = os.path.join(self.old_buffer_checkpoint_dir, '_old_buffer_' + str(id) +'.dat')
if os.path.exists(old_buffer_file):
file = open(old_buffer_file, 'rb')
self.old_buffer = pickle.load(file)
#self.io_buffer = pickle.load(file)
file.close()
def old_buffer_writer(self, id):
if not os.path.exists(self.old_buffer_checkpoint_dir):
os.makedirs(self.old_buffer_checkpoint_dir)
old_buffer_file = os.path.join(self.old_buffer_checkpoint_dir, '_old_buffer_' + str(id) +'.dat')
file_writer = open(old_buffer_file, 'wb')
return file_writer
def delete_old_buffer(self):
if os.path.exists(self.old_buffer_checkpoint_dir):
files = os.listdir(self.old_buffer_checkpoint_dir)
for file in files:
if file.startswith('_old_buffer_') and file.endswith('.dat'):
os.remove(os.path.join(self.old_buffer_checkpoint_dir, file))
def transform_sent(self, doc_item):
return torch.LongTensor(doc_item[0]), \
torch.LongTensor([doc_item[1]]), \
torch.LongTensor([doc_item[2]])
def __len__(self):
return self.total_records_num
def __load_data__(self):
print('[%s] Start loading data into memory' % nlp_format_dataloader.get_current_time())
load_start_time = time.time()
self.data = torch.load(os.path.join(self.data_folder, self.split + '_data.pth.tar'))
self.num_records = len(self.data['labels'])
if (self.use_clustered_data):
zipped = zip(self.data['sents'],
self.data['words_per_sentence'], self.data['labels'])
sort_zipped = sorted(zipped, key=lambda x:(x[2]))
self.data['sents'], self.data['words_per_sentence'], self.data['labels'] = zip(*sort_zipped)
for i in range(0, self.num_records):
self.data_buffer.append((self.data['sents'][i], self.data['words_per_sentence'][i], self.data['labels'][i]))
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (nlp_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def in_mem_bismarck_loader(self):
for data_index in range(0, self.total_records_num):
yield self.data_buffer[data_index] | 9,240 | 36.718367 | 159 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlpformat/in_mem_once_fully_shuffle/dataset.py | import numpy as np
import random
import time
import os
import torch.utils.data
from nlpformat.loader import nlp_format_dataloader
class InMemOnceFullyShuffleDocDataset(torch.utils.data.Dataset):
def __init__(self,
data_folder: str, split: str,
use_clustered_data: bool
) -> None:
super(InMemOnceFullyShuffleDocDataset, self).__init__()
self.data_folder = data_folder
self.use_clustered_data = use_clustered_data
split = split.upper()
assert split in {'TRAIN', 'TEST'}
self.split = split
self.data_buffer = []
self.__load_data__()
self.num_records = len(self.data_buffer['labels'])
def __getitem__(self, idx):
return torch.LongTensor(self.data_buffer['docs'][idx]), \
torch.LongTensor([self.data_buffer['sentences_per_document'][idx]]), \
torch.LongTensor(self.data_buffer['words_per_sentence'][idx]), \
torch.LongTensor([self.data_buffer['labels'][idx]])
def __load_data__(self):
print('[%s] Start loading data into memory' % nlp_format_dataloader.get_current_time())
load_start_time = time.time()
self.data_buffer = torch.load(os.path.join(self.data_folder, self.split + '_data.pth.tar'))
zipped = zip(self.data_buffer['docs'], self.data_buffer['sentences_per_document'],
self.data_buffer['words_per_sentence'], self.data_buffer['labels'])
temp = list(zipped)
random.shuffle(temp)
self.data_buffer['docs'], self.data_buffer['sentences_per_document'], self.data_buffer['words_per_sentence'], self.data_buffer['labels'] = zip(*temp)
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (nlp_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def __len__(self):
return self.num_records
class InMemOnceFullyShuffleSentDataset(torch.utils.data.Dataset):
def __init__(self,
data_folder: str, split: str,
use_clustered_data: bool
) -> None:
super(InMemOnceFullyShuffleSentDataset, self).__init__()
self.data_folder = data_folder
self.use_clustered_data = use_clustered_data
split = split.upper()
assert split in {'TRAIN', 'TEST'}
self.split = split
self.data_buffer = []
self.__load_data__()
self.num_records = len(self.data_buffer['labels'])
def __getitem__(self, i):
return torch.LongTensor(self.data_buffer['sents'][i]), \
torch.LongTensor([self.data_buffer['words_per_sentence'][i]]), \
torch.LongTensor([self.data_buffer['labels'][i]])
def __load_data__(self):
print('[%s] Start loading data into memory' % nlp_format_dataloader.get_current_time())
load_start_time = time.time()
self.data_buffer = torch.load(os.path.join(self.data_folder, self.split + '_data.pth.tar'))
zipped = zip(self.data_buffer['sents'],
self.data_buffer['words_per_sentence'], self.data_buffer['labels'])
temp = list(zipped)
random.shuffle(temp)
self.data_buffer['sents'], self.data_buffer['words_per_sentence'], self.data_buffer['labels'] = zip(*temp)
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (nlp_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def __len__(self):
return self.num_records
| 3,641 | 33.685714 | 157 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlpformat/in_mem_block_only/block_only_dataset.py | """Load tfrecord files into torch datasets."""
import numpy as np
import random
import time
import os
import torch.utils.data
from nlpformat.loader import nlp_format_dataloader
from nlpformat.in_mem_block_only import block_only_iterator_utils
class InMemBlockOnlyDocDataset(torch.utils.data.IterableDataset):
def __init__(self,
data_folder: str,
split: str,
use_clustered_data: bool,
block_num: int,
buffer_size_ratio: float,
drop_last=False,
) -> None:
super(InMemBlockOnlyDocDataset, self).__init__()
self.data_folder = data_folder
self.use_clustered_data = use_clustered_data
split = split.upper()
assert split in {'TRAIN', 'TEST'}
self.split = split
self.data_buffer = []
self.block_num = block_num
self.__load_data__()
self.buffer_size = int(self.num_records * buffer_size_ratio)
self.drop_last = drop_last
self.block_index_list = self.split_index_to_blocks()
def __load_data__(self):
print('[%s] Start loading data into memory' % nlp_format_dataloader.get_current_time())
load_start_time = time.time()
self.data = torch.load(os.path.join(self.data_folder, self.split + '_data.pth.tar'))
self.num_records = len(self.data['labels'])
if (self.use_clustered_data):
zipped = zip(self.data['docs'], self.data['sentences_per_document'],
self.data['words_per_sentence'], self.data['labels'])
sort_zipped = sorted(zipped, key=lambda x:(x[3]))
self.data['docs'], self.data['sentences_per_document'], self.data['words_per_sentence'], self.data['labels'] = zip(*sort_zipped)
for i in range(0, self.num_records):
self.data_buffer.append((self.data['docs'][i], self.data['sentences_per_document'][i], self.data['words_per_sentence'][i], self.data['labels'][i]))
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (nlp_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def split_index_to_blocks(self):
# e.g., [0, 71, 142, 213, 284, 355, 426, 497, 568, 639]
assert(self.block_num < self.num_records)
block_tuple_num = int(self.num_records / self.block_num)
print ('[param] block_num = %d, block_tuple_num = %d' % (self.block_num, block_tuple_num) )
# store index_id instead of file_offset
block_index_list = []
for idx in range(0, self.block_num):
start_index = block_tuple_num * idx
end_index = block_tuple_num * (idx + 1)
if end_index > self.num_records:
end_index = self.num_records
if start_index < self.num_records:
block_index_list.append((start_index, end_index))
#print(block_index_list)
return block_index_list
def __iter__(self):
it = self.in_mem_block_iterator()
it = block_only_iterator_utils.shuffle_iterator(it, self.buffer_size)
it = map(self.transform_doc, it)
return it
def transform_doc(self, doc_item):
return torch.LongTensor(doc_item[0]), \
torch.LongTensor([doc_item[1]]), \
torch.LongTensor(doc_item[2]), \
torch.LongTensor([doc_item[3]])
def in_mem_block_iterator(self):
random.shuffle(self.block_index_list)
# [(0, 71), (213, 284), (142, 213), (71, 142), (284, 355)]
for block_index in self.block_index_list:
for data_index in range(block_index[0], block_index[1]):
yield self.data_buffer[data_index]
def __len__(self):
return self.num_records
class InMemBlockOnlySentDataset(torch.utils.data.IterableDataset):
def __init__(self,
data_folder: str,
split: str,
use_clustered_data: bool,
block_num: int,
buffer_size_ratio: float,
drop_last=False,
) -> None:
super(InMemBlockOnlySentDataset, self).__init__()
self.data_folder = data_folder
self.use_clustered_data = use_clustered_data
split = split.upper()
assert split in {'TRAIN', 'TEST'}
self.split = split
self.data_buffer = []
self.block_num = block_num
self.__load_data__()
self.buffer_size = int(self.num_records * buffer_size_ratio)
self.drop_last = drop_last
self.block_index_list = self.split_index_to_blocks()
def __load_data__(self):
print('[%s] Start loading data into memory' % nlp_format_dataloader.get_current_time())
load_start_time = time.time()
self.data = torch.load(os.path.join(self.data_folder, self.split + '_data.pth.tar'))
self.num_records = len(self.data['labels'])
if (self.use_clustered_data):
zipped = zip(self.data['sents'],
self.data['words_per_sentence'], self.data['labels'])
sort_zipped = sorted(zipped, key=lambda x:(x[2]))
self.data['sents'], self.data['words_per_sentence'], self.data['labels'] = zip(*sort_zipped)
for i in range(0, self.num_records):
self.data_buffer.append((self.data['sents'][i], self.data['words_per_sentence'][i], self.data['labels'][i]))
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (nlp_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def split_index_to_blocks(self):
# e.g., [0, 71, 142, 213, 284, 355, 426, 497, 568, 639]
assert(self.block_num < self.num_records)
block_tuple_num = int(self.num_records / self.block_num)
print ('[param] block_num = %d, block_tuple_num = %d' % (self.block_num, block_tuple_num) )
# store index_id instead of file_offset
block_index_list = []
for idx in range(0, self.block_num):
start_index = block_tuple_num * idx
end_index = block_tuple_num * (idx + 1)
if end_index > self.num_records:
end_index = self.num_records
if start_index < self.num_records:
block_index_list.append((start_index, end_index))
#print(block_index_list)
return block_index_list
def __iter__(self):
it = self.in_mem_block_iterator()
it = block_only_iterator_utils.shuffle_iterator(it, self.buffer_size)
it = map(self.transform_sent, it)
return it
def transform_sent(self, doc_item):
return torch.LongTensor(doc_item[0]), \
torch.LongTensor([doc_item[1]]), \
torch.LongTensor([doc_item[2]])
def in_mem_block_iterator(self):
random.shuffle(self.block_index_list)
# [(0, 71), (213, 284), (142, 213), (71, 142), (284, 355)]
for block_index in self.block_index_list:
for data_index in range(block_index[0], block_index[1]):
yield self.data_buffer[data_index]
def __len__(self):
return self.num_records | 7,336 | 34.965686 | 159 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/nlpformat/in_mem_no_shuffle/dataset.py | import numpy as np
import random
import time
import torch.utils.data
from nlpformat.loader import nlp_format_dataloader
"""
Load data from manually preprocessed data (see ``datasets/prepocess/``).
"""
import os
from typing import Tuple
import torch
from torch.utils.data import Dataset
class InMemNoShuffleDocDataset(torch.utils.data.Dataset):
"""
A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches
(for document classification).
Parameters
----------
data_folder : str
Path to folder where data files are stored
split : str
Split, one of 'TRAIN' or 'TEST'
"""
def __init__(self,
data_folder: str, split: str,
use_clustered_data: bool = True
) -> None:
super(InMemNoShuffleDocDataset, self).__init__()
self.data_folder = data_folder
self.use_clustered_data = use_clustered_data
split = split.upper()
assert split in {'TRAIN', 'TEST'}
self.split = split
self.data_buffer = []
self.__load_data__()
# load data
def __getitem__(self, idx):
return torch.LongTensor(self.data_buffer['docs'][idx]), \
torch.LongTensor([self.data_buffer['sentences_per_document'][idx]]), \
torch.LongTensor(self.data_buffer['words_per_sentence'][idx]), \
torch.LongTensor([self.data_buffer['labels'][idx]])
def __load_data__(self):
print('[%s] Start loading data into memory' % nlp_format_dataloader.get_current_time())
load_start_time = time.time()
self.data_buffer = torch.load(os.path.join(self.data_folder, self.split + '_data.pth.tar'))
self.num_records = len(self.data_buffer['labels'])
if (self.use_clustered_data):
zipped = zip(self.data_buffer['docs'], self.data_buffer['sentences_per_document'],
self.data_buffer['words_per_sentence'], self.data_buffer['labels'])
sort_zipped = sorted(zipped, key=lambda x:(x[3]))
self.data_buffer['docs'], self.data_buffer['sentences_per_document'], self.data_buffer['words_per_sentence'], self.data_buffer['labels'] = zip(*sort_zipped)
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (nlp_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def __len__(self):
return self.num_records
class InMemNoShuffleSentDataset(torch.utils.data.Dataset):
"""
A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches
(for sentence classification).
Parameters
----------
data_folder : str
Path to folder where data files are stored
split : str
Split, one of 'TRAIN' or 'TEST'
"""
def __init__(self,
data_folder: str,
split: str,
use_clustered_data: bool = True
) -> None:
super(InMemNoShuffleSentDataset, self).__init__()
self.data_folder = data_folder
self.use_clustered_data = use_clustered_data
split = split.upper()
assert split in {'TRAIN', 'TEST'}
self.split = split
self.data_buffer = []
self.__load_data__()
def __getitem__(self, i: int) -> Tuple[torch.LongTensor, torch.LongTensor, torch.LongTensor]:
return torch.LongTensor(self.data_buffer['sents'][i]), \
torch.LongTensor([self.data_buffer['words_per_sentence'][i]]), \
torch.LongTensor([self.data_buffer['labels'][i]])
def __load_data__(self):
print('[%s] Start loading data into memory' % nlp_format_dataloader.get_current_time())
load_start_time = time.time()
self.data_buffer = torch.load(os.path.join(self.data_folder, self.split + '_data.pth.tar'))
self.num_records = len(self.data_buffer['labels'])
if (self.use_clustered_data):
zipped = zip(self.data_buffer['sents'],
self.data_buffer['words_per_sentence'], self.data_buffer['labels'])
sort_zipped = sorted(zipped, key=lambda x:(x[2]))
result = zip(*sort_zipped)
self.data_buffer['sents'], self.data_buffer['words_per_sentence'], self.data_buffer['labels'] = [list(x) for x in result]
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (nlp_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def __len__(self):
return self.num_records | 4,597 | 32.562044 | 168 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifarformat/in_mem_sliding_window/dataset.py | import numpy as np
import warnings
import random
import time
import torch.utils.data
from cifarformat.loader import cifar_format_dataloader
class InMemSlidingWindowCifarDataset(torch.utils.data.IterableDataset):
def __init__(self,
base_dir: str,
use_clustered_data: bool,
sliding_window_size_ratio: float,
train=True, transform=None, target_transform=None,
data_name='cifar10'
) -> None:
super(InMemSlidingWindowCifarDataset, self).__init__()
self.base_dir = base_dir
self.data_name = data_name
self.train = train
self.use_clustered_data = use_clustered_data
self.transform = transform
self.target_transform = target_transform
self.data_buffer = []
self.sliding_window_size_ratio = sliding_window_size_ratio
self.__load_data__()
self.num_records = len(self.data_buffer)
self.shuffle_window_size = int(self.sliding_window_size_ratio * self.num_records)
def __load_data__(self):
print('[%s] Start loading data into memory' % cifar_format_dataloader.get_current_time())
load_start_time = time.time()
it = cifar_format_dataloader.reader_iterator(self.base_dir, self.train, self.use_clustered_data, self.data_name)
self.data_buffer.extend(it)
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (cifar_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def transform_item(self, img_targe_item):
img = img_targe_item[0]
target = img_targe_item[1]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __iter__(self):
it = self.in_mem_sliding_window_iterator()
if self.transform or self.transform_target:
it = map(self.transform_item, it)
return it
def __len__(self):
return self.num_records
def in_mem_sliding_window_iterator(self):
buffer = []
try:
for i in range(0, self.shuffle_window_size):
buffer.append(self.data_buffer[i])
except StopIteration:
warnings.warn("Number of elements in the iterator is less than the "
f"queue size (N={self.shuffle_window_size}).")
buffer_size = self.shuffle_window_size
for i in range(buffer_size, self.num_records):
index = random.randint(0, buffer_size - 1)
item = buffer[index]
buffer[index] = self.data_buffer[i]
yield item
random.shuffle(buffer)
for i in range(0, len(buffer)):
yield buffer[i]
| 2,903 | 30.565217 | 126 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifarformat/in_mem_block/block_dataset.py | """Load tfrecord files into torch datasets."""
import typing
import numpy as np
import datetime
import random
import time
import torch.utils.data
from cifarformat.loader import cifar_format_dataloader
from cifarformat.in_mem_block import block_iterator_utils
class InMemBlockCifarDataset(torch.utils.data.IterableDataset):
def __init__(self,
base_dir: str,
use_clustered_data: bool,
block_num: int,
buffer_size_ratio: float,
drop_last=False,
train=True, transform=None, target_transform=None,
data_name='cifar10'
) -> None:
super(InMemBlockCifarDataset, self).__init__()
self.data_buffer = []
self.base_dir = base_dir
self.block_num = block_num
self.data_name = data_name
self.use_clustered_data = use_clustered_data
self.train = train
self.transform = transform
self.target_transform = target_transform
self.__load_data__()
self.num_records = len(self.data_buffer)
self.buffer_size = int(self.num_records * buffer_size_ratio)
self.drop_last = drop_last
self.block_index_list = self.split_index_to_blocks()
# print("[Tip] Using datast.set_epoch(epoch) to shuffle the blocks before each epoch.")
# shuffle the block_index_list before each epoch
def split_index_to_blocks(self):
# e.g., [0, 71, 142, 213, 284, 355, 426, 497, 568, 639]
assert(self.block_num < self.num_records)
block_tuple_num = int(self.num_records / self.block_num)
print ('[param] block_num = %d, block_tuple_num = %d' % (self.block_num, block_tuple_num) )
# store index_id instead of file_offset
block_index_list = []
for idx in range(0, self.block_num):
start_index = block_tuple_num * idx
end_index = block_tuple_num * (idx + 1)
if end_index > self.num_records:
end_index = self.num_records
if start_index < self.num_records:
block_index_list.append((start_index, end_index))
#print(block_index_list)
return block_index_list
def __iter__(self):
it = self.in_mem_block_iterator()
it = block_iterator_utils.shuffle_iterator(it, self.buffer_size)
if self.transform or self.transform_target:
it = map(self.transform_item, it)
return it
def transform_item(self, img_targe_item):
img = img_targe_item[0]
target = img_targe_item[1]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def in_mem_block_iterator(self):
random.shuffle(self.block_index_list)
# [(0, 71), (213, 284), (142, 213), (71, 142), (284, 355)]
for block_index in self.block_index_list:
for data_index in range(block_index[0], block_index[1]):
yield self.data_buffer[data_index]
def __load_data__(self):
print('[%s] Start loading data into memory' % cifar_format_dataloader.get_current_time())
load_start_time = time.time()
it = cifar_format_dataloader.reader_iterator(self.base_dir, self.train, self.use_clustered_data, self.data_name)
self.data_buffer.extend(it)
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (cifar_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def __len__(self):
return self.num_records
| 3,726 | 32.276786 | 126 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifarformat/in_mem_bismarck/dataset.py | """Load tfrecord files into torch datasets."""
import numpy as np
import os
import pickle
import time
import torch.utils.data
from cifarformat.loader import cifar_format_dataloader
from cifarformat.in_mem_bismarck import iterator_utils
class InMemBismarckCifarDataset(torch.utils.data.IterableDataset):
def __init__(self,
base_dir: str,
use_clustered_data: bool,
bismarck_buffer_size_ratio: float,
select_ratio_from_old_buffer: float,
old_buffer_checkpoint_dir: str,
train=True, transform=None, target_transform=None,
data_name='cifar10'
) -> None:
super(InMemBismarckCifarDataset, self).__init__()
self.base_dir = base_dir
self.data_name = data_name
self.bismarck_buffer_size_ratio = bismarck_buffer_size_ratio
self.select_ratio_from_old_buffer = select_ratio_from_old_buffer
self.use_clustered_data = use_clustered_data
self.train = train
self.transform = transform
self.target_transform = target_transform
self.data_buffer = []
self.__load_data__()
self.total_records_num = len(self.data_buffer)
#self.num_records_from_old_buffer = self.total_records_num * self.select_ratio_on_old_buffer
self.io_buffer_size = int(self.total_records_num * bismarck_buffer_size_ratio)
assert(self.io_buffer_size > 0)
# buffer in the memory worker in Bismarck's SIGMOD paper
self.old_buffer = []
self.io_buffer = []
self.old_buffer_checkpoint_dir = old_buffer_checkpoint_dir
if self.old_buffer_checkpoint_dir:
self.delete_old_buffer()
def __iter__(self):
it = self.in_mem_bismarck_loader()
id = 1
self.load_old_buffer(id)
file_writer = self.old_buffer_writer(id)
it = iterator_utils.shuffle_iterator(it,
self.io_buffer, self.io_buffer_size,
self.total_records_num,
self.old_buffer,
self.select_ratio_from_old_buffer,
file_writer)
if self.transform or self.target_transform:
it = map(self.transform_item, it)
return it
def load_old_buffer(self, id):
old_buffer_file = os.path.join(self.old_buffer_checkpoint_dir, '_old_buffer_' + str(id) +'.dat')
if os.path.exists(old_buffer_file):
file = open(old_buffer_file, 'rb')
self.old_buffer = pickle.load(file)
#self.io_buffer = pickle.load(file)
file.close()
def old_buffer_writer(self, id):
if not os.path.exists(self.old_buffer_checkpoint_dir):
os.makedirs(self.old_buffer_checkpoint_dir)
old_buffer_file = os.path.join(self.old_buffer_checkpoint_dir, '_old_buffer_' + str(id) +'.dat')
file_writer = open(old_buffer_file, 'wb')
return file_writer
def delete_old_buffer(self):
if os.path.exists(self.old_buffer_checkpoint_dir):
files = os.listdir(self.old_buffer_checkpoint_dir)
for file in files:
if file.startswith('_old_buffer_') and file.endswith('.dat'):
os.remove(os.path.join(self.old_buffer_checkpoint_dir, file))
def transform_item(self, img_targe_item):
img = img_targe_item[0]
target = img_targe_item[1]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return self.total_records_num
def __load_data__(self):
print('[%s] Start loading data into memory' % cifar_format_dataloader.get_current_time())
load_start_time = time.time()
it = cifar_format_dataloader.reader_iterator(self.base_dir, self.train, self.use_clustered_data, self.data_name)
self.data_buffer.extend(it)
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (cifar_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def in_mem_bismarck_loader(self):
for data_index in range(0, self.total_records_num):
yield self.data_buffer[data_index] | 4,460 | 35.867769 | 126 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifarformat/in_mem_once_fully_shuffle/dataset.py | import numpy as np
import random
import time
import torch.utils.data
from cifarformat.loader import cifar_format_dataloader
class InMemOnceFullyShuffleCifarDataset(torch.utils.data.Dataset):
def __init__(self,
base_dir: str,
use_clustered_data: bool,
train=True, transform=None, target_transform=None,
data_name='cifar10'
) -> None:
super(InMemOnceFullyShuffleCifarDataset, self).__init__()
self.base_dir = base_dir
self.data_name = data_name
self.use_clustered_data = use_clustered_data
self.train = train
self.transform = transform
self.target_transform = target_transform
self.data_buffer = []
self.__load_data__()
self.buffer_len = len(self.data_buffer)
def transform_item(self, img_targe_item):
img = img_targe_item[0]
target = img_targe_item[1]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __getitem__(self, idx):
return self.transform_item(self.data_buffer[idx])
def __load_data__(self):
print('[%s] Start loading data into memory' % cifar_format_dataloader.get_current_time())
load_start_time = time.time()
it = cifar_format_dataloader.reader_iterator(self.base_dir, self.train, self.use_clustered_data, self.data_name)
self.data_buffer.extend(it)
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (cifar_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
random.shuffle(self.data_buffer)
#print(self.data_buffer[0])
sort_end_time = time.time()
print("[%s] data_sort_time = %.2fs" % (cifar_format_dataloader.get_current_time(), (sort_end_time - load_end_time)))
def __len__(self):
return self.buffer_len
| 2,060 | 29.761194 | 126 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifarformat/in_mem_always_fully_shuffle/dataset.py | import numpy as np
import random
import time
import torch.utils.data
from cifarformat.loader import cifar_format_dataloader
class InMemAlwaysFullyShuffleCifarDataset(torch.utils.data.Dataset):
def __init__(self,
base_dir: str,
use_clustered_data: bool,
train=True, transform=None, target_transform=None,
data_name='cifar10'
) -> None:
super(InMemAlwaysFullyShuffleCifarDataset, self).__init__()
self.base_dir = base_dir
self.data_name = data_name
self.use_clustered_data = use_clustered_data
self.train = train
self.transform = transform
self.target_transform = target_transform
self.data_buffer = []
self.__load_data__()
self.buffer_len = len(self.data_buffer)
def transform_item(self, img_targe_item):
img = img_targe_item[0]
target = img_targe_item[1]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __getitem__(self, idx):
if (idx == 0):
random.shuffle(self.data_buffer)
return self.transform_item(self.data_buffer[idx])
def __load_data__(self):
print('[%s] Start loading data into memory' % cifar_format_dataloader.get_current_time())
load_start_time = time.time()
it = cifar_format_dataloader.reader_iterator(self.base_dir, self.train, self.use_clustered_data, self.data_name)
self.data_buffer.extend(it)
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (cifar_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
random.shuffle(self.data_buffer)
sort_end_time = time.time()
print("[%s] data_sort_time = %.2fs" % (cifar_format_dataloader.get_current_time(), (sort_end_time - load_end_time)))
def __len__(self):
return self.buffer_len
| 2,096 | 29.838235 | 126 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifarformat/loader/cifar_format_dataloader.py | """Reader utils"""
import os
import time
import numpy as np
import torchvision
import random
class MY_CIFAR10(torchvision.datasets.CIFAR10):
def __init__(self, root, train=True, use_clustered_data=True):
super(MY_CIFAR10, self).__init__(root, train=train, transform=None,
target_transform=None, download=False)
if (use_clustered_data):
# sort the in-memory data of the original CIFAR10
zipped = zip(self.data, self.targets)
sort_zipped = sorted(zipped, key=lambda x:(x[1]))
result = zip(*sort_zipped)
self.data, self.targets = [list(x) for x in result]
def reader_iterator(base_dir, train, use_clustered_data=True, data_name='cifar10'):
if (data_name == 'cifar10'):
trainset = MY_CIFAR10(root=base_dir, train=train, use_clustered_data=use_clustered_data)
for i in range(0, len(trainset)):
yield trainset[i]
def get_current_time() :
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
| 1,059 | 29.285714 | 96 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifarformat/in_mem_block_only/block_only_dataset.py | """Load tfrecord files into torch datasets."""
import typing
import numpy as np
import datetime
import random
import time
import torch.utils.data
from cifarformat.loader import cifar_format_dataloader
from cifarformat.in_mem_block_only import block_only_iterator_utils
class InMemBlockOnlyCifarDataset(torch.utils.data.IterableDataset):
def __init__(self,
base_dir: str,
use_clustered_data: bool,
block_num: int,
buffer_size_ratio: float,
drop_last=False,
train=True, transform=None, target_transform=None,
data_name='cifar10'
) -> None:
super(InMemBlockOnlyCifarDataset, self).__init__()
self.data_buffer = []
self.base_dir = base_dir
self.block_num = block_num
self.data_name = data_name
self.use_clustered_data = use_clustered_data
self.train = train
self.transform = transform
self.target_transform = target_transform
self.__load_data__()
self.num_records = len(self.data_buffer)
self.buffer_size = int(self.num_records * buffer_size_ratio)
self.drop_last = drop_last
self.block_index_list = self.split_index_to_blocks()
# print("[Tip] Using datast.set_epoch(epoch) to shuffle the blocks before each epoch.")
# shuffle the block_index_list before each epoch
def split_index_to_blocks(self):
# e.g., [0, 71, 142, 213, 284, 355, 426, 497, 568, 639]
assert(self.block_num < self.num_records)
block_tuple_num = int(self.num_records / self.block_num)
print ('[param] block_num = %d, block_tuple_num = %d' % (self.block_num, block_tuple_num) )
# store index_id instead of file_offset
block_index_list = []
for idx in range(0, self.block_num):
start_index = block_tuple_num * idx
end_index = block_tuple_num * (idx + 1)
if end_index > self.num_records:
end_index = self.num_records
if start_index < self.num_records:
block_index_list.append((start_index, end_index))
#print(block_index_list)
return block_index_list
def __iter__(self):
it = self.in_mem_block_iterator()
it = block_only_iterator_utils.shuffle_iterator(it, self.buffer_size)
if self.transform or self.transform_target:
it = map(self.transform_item, it)
return it
def transform_item(self, img_targe_item):
img = img_targe_item[0]
target = img_targe_item[1]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def in_mem_block_iterator(self):
random.shuffle(self.block_index_list)
# [(0, 71), (213, 284), (142, 213), (71, 142), (284, 355)]
for block_index in self.block_index_list:
for data_index in range(block_index[0], block_index[1]):
yield self.data_buffer[data_index]
def __load_data__(self):
print('[%s] Start loading data into memory' % cifar_format_dataloader.get_current_time())
load_start_time = time.time()
it = cifar_format_dataloader.reader_iterator(self.base_dir, self.train, self.use_clustered_data, self.data_name)
self.data_buffer.extend(it)
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (cifar_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def __len__(self):
return self.num_records
| 3,748 | 32.473214 | 126 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifarformat/in_mem_no_shuffle/dataset.py | import numpy as np
import random
import time
import torch.utils.data
from cifarformat.loader import cifar_format_dataloader
class InMemNoShuffleCifarDataset(torch.utils.data.Dataset):
def __init__(self,
base_dir: str,
use_clustered_data: bool,
train=True, transform=None, target_transform=None,
data_name='cifar10'
) -> None:
super(InMemNoShuffleCifarDataset, self).__init__()
self.base_dir = base_dir
self.data_name = data_name
self.use_clustered_data = use_clustered_data
self.train = train
self.transform = transform
self.target_transform = target_transform
self.data_buffer = []
self.__load_data__()
self.buffer_len = len(self.data_buffer)
def transform_item(self, img_targe_item):
img = img_targe_item[0]
target = img_targe_item[1]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __getitem__(self, idx):
return self.transform_item(self.data_buffer[idx])
def __load_data__(self):
print('[%s] Start loading data into memory' % cifar_format_dataloader.get_current_time())
load_start_time = time.time()
it = cifar_format_dataloader.reader_iterator(self.base_dir, self.train, self.use_clustered_data, self.data_name)
self.data_buffer.extend(it)
load_end_time = time.time()
print("[%s] data_load_time = %.2fs" % (cifar_format_dataloader.get_current_time(), (load_end_time - load_start_time)))
def __len__(self):
return self.buffer_len
| 1,802 | 28.557377 | 126 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/cifar_dl_bench_train.py | '''Train CIFAR10 with PyTorch.'''
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import sys
import time
import random
sys.path.append("../cifarformat")
sys.path.append(".")
from models import *
from cifarformat import in_mem_bismarck, in_mem_block, in_mem_block_only, in_mem_no_shuffle, in_mem_sliding_window, in_mem_once_fully_shuffle, in_mem_always_fully_shuffle
# Training
def train(epoch, net, trainloader, device, optimizer, criterion):
#print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
def test(epoch, net, testloader, device, criterion):
#global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
# Save checkpoint.
acc = 100.*correct/total
return (acc, test_loss)
def main_worker(args):
log_file = args['log_file']
writer = open(log_file, 'w')
for k in args:
writer.write("[params] " + str(k) + " = " + str(args[k]) + '\n')
writer.flush()
writer.write('[%s] Start iteration' % get_current_time())
writer.write('\n')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
data_dir = args['data_dir']
download = args['download']
num_workers = args['num_workers']
iter_num = args['iter_num']
learning_rate = args['learning_rate']
saving = args['saving']
shuffle_mode = args['shuffle_mode']
model_name = args['model_name']
batch_size = args['batch_size']
use_train_accuracy = args['use_train_accuracy']
use_sgd = args['use_sgd']
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
sliding_window_size_ratio = args['sliding_window_size_ratio']
bismarck_buffer_size_ratio = args['bismarck_buffer_size_ratio']
if (shuffle_mode == 'bismarck_mrs'):
select_ratio_from_old_buffer = args['select_ratio_from_old_buffer']
block_num = args['block_num']
buffer_size_ratio = args['buffer_size_ratio']
use_clustered_data = args['use_clustered_data']
if (shuffle_mode == 'once_fully'):
trainset = in_mem_once_fully_shuffle.InMemOnceFullyShuffleCifarDataset(
base_dir=data_dir,
use_clustered_data=use_clustered_data,
train=True, transform=transform_train)
elif (shuffle_mode == 'no_shuffle'):
trainset = in_mem_no_shuffle.InMemNoShuffleCifarDataset(
base_dir=data_dir,
use_clustered_data=use_clustered_data,
train=True, transform=transform_train)
elif (shuffle_mode == 'always_fully'):
trainset = in_mem_always_fully_shuffle.InMemAlwaysFullyShuffleCifarDataset(
base_dir=data_dir,
use_clustered_data=use_clustered_data,
train=True, transform=transform_train)
elif (shuffle_mode == 'bismarck_mrs'):
old_buffer_checkpoint_dir = args['old_buffer_checkpoint_dir']
trainset = in_mem_bismarck.InMemBismarckCifarDataset(
base_dir=data_dir,
use_clustered_data=use_clustered_data,
bismarck_buffer_size_ratio=bismarck_buffer_size_ratio,
select_ratio_from_old_buffer=select_ratio_from_old_buffer,
old_buffer_checkpoint_dir=old_buffer_checkpoint_dir,
train=True, transform=transform_train)
elif (shuffle_mode == 'block'):
trainset = in_mem_block.InMemBlockCifarDataset(
base_dir=data_dir,
use_clustered_data=use_clustered_data,
block_num=block_num,
buffer_size_ratio=buffer_size_ratio,
drop_last=False,
train=True, transform=transform_train)
elif (shuffle_mode == 'block_only'):
trainset = in_mem_block_only.InMemBlockOnlyCifarDataset(
base_dir=data_dir,
use_clustered_data=use_clustered_data,
block_num=block_num,
buffer_size_ratio=buffer_size_ratio,
drop_last=False,
train=True, transform=transform_train)
elif (shuffle_mode == 'sliding_window'):
trainset = in_mem_sliding_window.InMemSlidingWindowCifarDataset(
base_dir=data_dir,
use_clustered_data=use_clustered_data,
sliding_window_size_ratio=sliding_window_size_ratio,
train=True, transform=transform_train)
else:
trainset = torchvision.datasets.CIFAR10(
root=data_dir, train=True, download=download, transform=transform_train)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
if (use_train_accuracy):
testset = torchvision.datasets.CIFAR10(
root=data_dir, train=True, download=download, transform=transform_train)
else:
testset = torchvision.datasets.CIFAR10(
root=data_dir, train=False, download=download, transform=transform_test)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
# Model
writer.write('==> Building model..\n')
if (model_name == 'ResNet18'):
net = ResNet18()
elif (model_name == 'VGG19'):
net = VGG('VGG19')
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
if args['resume']:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt.pth')
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
criterion = nn.CrossEntropyLoss()
if (use_sgd):
optimizer = optim.SGD(net.parameters(), lr=learning_rate,
momentum=0.9, weight_decay=5e-4)
else:
optimizer = optim.Adam(net.parameters(), lr=learning_rate,
weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=iter_num)
avg_exec_t = 0.0
avg_grad_t = 0.0
avg_loss_t = 0.0
first_exec_t = 0.0
first_grad_t = 0.0
first_loss_t = 0.0
second_exec_t = 0.0
second_grad_t = 0.0
second_loss_t = 0.0
max_accuracy = 0.0
print('[%s] Start training' % get_current_time())
for epoch in range(start_epoch, start_epoch + iter_num):
start = time.time()
train(epoch, net, trainloader, device, optimizer, criterion)
grad_end = time.time()
(acc, test_loss) = test(epoch, net, testloader, device, criterion)
loss_end = time.time()
exec_t = loss_end - start
grad_t = grad_end - start
loss_t = exec_t - grad_t
if saving == True and acc > best_acc:
print('Saving..')
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt.pth')
best_acc = acc
scheduler.step()
i = epoch
avg_exec_t += exec_t
avg_grad_t += grad_t
avg_loss_t += loss_t
if i == 0:
first_exec_t = exec_t
first_grad_t = grad_t
first_loss_t = loss_t
elif i == 1:
second_exec_t = exec_t
second_grad_t = grad_t
second_loss_t = loss_t
writer.write('[%s] [Epoch %2d] Loss = %.2f, acc = %.2f, exec_t = %.2fs, grad_t = %.2fs, loss_t = %.2fs' %
(get_current_time(), i + 1, test_loss, acc, round(exec_t, 2),
round(grad_t, 2), round(loss_t, 2)))
writer.write('\n')
writer.flush()
if acc > max_accuracy:
max_accuracy = acc
writer.write('[%s] [Finish] avg_exec_t = %.2fs, avg_grad_t = %.2fs, avg_loss_t = %.2fs' %
(get_current_time(), avg_exec_t / iter_num,
avg_grad_t / iter_num, avg_loss_t / iter_num))
writer.write('\n')
if iter_num > 2:
avg_exec_t -= first_exec_t
avg_grad_t -= first_grad_t
avg_loss_t -= first_loss_t
writer.write('[%s] [-first] avg_exec_t = %.2fs, avg_grad_t = %.2fs, avg_loss_t = %.2fs' %
(get_current_time(), avg_exec_t / (iter_num - 1),
avg_grad_t / (iter_num - 1), avg_loss_t / (iter_num - 1)))
writer.write('\n')
avg_exec_t -= second_exec_t
avg_grad_t -= second_grad_t
avg_loss_t -= second_loss_t
writer.write('[%s] [-1 & 2] avg_exec_t = %.2fs, avg_grad_t = %.2fs, avg_loss_t = %.2fs' %
(get_current_time(), avg_exec_t / (iter_num - 2),
avg_grad_t / (iter_num - 2), avg_loss_t / (iter_num - 2)))
writer.write('\n')
writer.write('[%s] [MaxAcc] max_accuracy = %.2f' %
(get_current_time(), max_accuracy))
writer.write('\n')
def get_current_time() :
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def get_current_time_filename():
return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
def main():
log_base_dir = '/mnt/ds3lab-scratch/xuliji/code/CorgiPile-PyTorch'
data_dir = '/mnt/ds3lab-scratch/xuliji/data/'
log_dir = 'train_log_cifar10_sgd'
model_name = 'ResNet18'
#model_name = 'VGG19'
data_name = 'cifar10'
use_clustered_data = True
use_train_accuracy = True # If False, it will compute and output test accuracy instead of train accuracy
use_sgd = True # If false, it will use Adam instead of SGD
batch_size = 128
iter_num = 10
num_workers = 1
lr_decay = 0.95
shuffle_modes = ['once_shuffle']
#shuffle_modes = ['once_shuffle', 'block', 'block', 'sliding_window', 'bismarck_mrs', 'no_shuffle', 'block_only']
#shuffle_modes = ['block', 'sliding_window', 'bismarck_mrs', 'no_shuffle', 'block_only']
#shuffle_modes = ['block']
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
n_records = 0
block_num = 500
if (model_name == 'ResNet18'):
learning_rate = 0.001
elif (model_name == 'VGG19'):
learning_rate = 0.001
args = {}
args['use_clustered_data'] = use_clustered_data
args['use_train_accuracy'] = use_train_accuracy
args['use_sgd'] = use_sgd
args['model_name'] = model_name
args['batch_size'] = batch_size
args['iter_num'] = iter_num
args['n_records'] = n_records
args['learning_rate'] = learning_rate
args['num_workers'] = num_workers
args['data_name'] = data_name
args['lr_decay'] = lr_decay
args['resume'] = False
args['data_dir'] = data_dir
args['download'] = False
args['saving'] = False
# for our-block-based sgd
buffer_size_ratio = 0.1
# for sliding_window
sliding_window_size_ratio = 0.1
# for bismarck_mrs
bismarck_buffer_size_ratio = 0.1
select_ratio_from_old_buffers = [0.4, 0.5]
args['block_num'] = block_num
args['buffer_size_ratio'] = buffer_size_ratio
args['sliding_window_size_ratio'] = sliding_window_size_ratio
args['bismarck_buffer_size_ratio'] = bismarck_buffer_size_ratio
args['old_buffer_checkpoint_dir'] = log_base_dir + '/checkpoint/' + get_current_time_filename() + str(random.randint(1,100))
for shuffle_mode in shuffle_modes:
args['shuffle_mode'] = shuffle_mode
if (shuffle_mode == 'bismarck_mrs'):
for ratio in select_ratio_from_old_buffers:
args['select_ratio_from_old_buffer'] = ratio
log_txt = shuffle_mode + '_' + data_name + '_lr' + str(learning_rate) + '_ratio_' + str(ratio) + '_' + get_current_time_filename() + '.txt'
outdir = os.path.join(log_base_dir, log_dir, data_name, model_name, 'sgd-bs' + str(batch_size), shuffle_mode)
log_file = os.path.join(outdir, log_txt)
args['log_file'] = log_file
if not os.path.exists(outdir):
os.makedirs(outdir)
main_worker(args)
else:
log_txt = shuffle_mode + '_' + data_name + '_lr' + str(learning_rate) + '_' + get_current_time_filename() + '.txt'
outdir = os.path.join(log_base_dir, log_dir, data_name, model_name, 'sgd-bs' + str(batch_size), shuffle_mode)
log_file = os.path.join(outdir, log_txt)
args['log_file'] = log_file
if not os.path.exists(outdir):
os.makedirs(outdir)
main_worker(args)
if __name__ == '__main__':
main()
| 14,325 | 30.906459 | 170 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/dla.py | '''DLA in PyTorch.
Reference:
Deep Layer Aggregation. https://arxiv.org/abs/1707.06484
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, kernel_size,
stride=1, padding=(kernel_size - 1) // 2, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, xs):
x = torch.cat(xs, 1)
out = F.relu(self.bn(self.conv(x)))
return out
class Tree(nn.Module):
def __init__(self, block, in_channels, out_channels, level=1, stride=1):
super(Tree, self).__init__()
self.level = level
if level == 1:
self.root = Root(2*out_channels, out_channels)
self.left_node = block(in_channels, out_channels, stride=stride)
self.right_node = block(out_channels, out_channels, stride=1)
else:
self.root = Root((level+2)*out_channels, out_channels)
for i in reversed(range(1, level)):
subtree = Tree(block, in_channels, out_channels,
level=i, stride=stride)
self.__setattr__('level_%d' % i, subtree)
self.prev_root = block(in_channels, out_channels, stride=stride)
self.left_node = block(out_channels, out_channels, stride=1)
self.right_node = block(out_channels, out_channels, stride=1)
def forward(self, x):
xs = [self.prev_root(x)] if self.level > 1 else []
for i in reversed(range(1, self.level)):
level_i = self.__getattr__('level_%d' % i)
x = level_i(x)
xs.append(x)
x = self.left_node(x)
xs.append(x)
x = self.right_node(x)
xs.append(x)
out = self.root(xs)
return out
class DLA(nn.Module):
def __init__(self, block=BasicBlock, num_classes=10):
super(DLA, self).__init__()
self.base = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(True)
)
self.layer1 = nn.Sequential(
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(True)
)
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(True)
)
self.layer3 = Tree(block, 32, 64, level=1, stride=1)
self.layer4 = Tree(block, 64, 128, level=2, stride=2)
self.layer5 = Tree(block, 128, 256, level=2, stride=2)
self.layer6 = Tree(block, 256, 512, level=1, stride=2)
self.linear = nn.Linear(512, num_classes)
def forward(self, x):
out = self.base(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = self.layer6(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = DLA()
print(net)
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y.size())
if __name__ == '__main__':
test()
| 4,425 | 31.544118 | 83 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/shufflenetv2.py | '''ShuffleNetV2 in PyTorch.
See the paper "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups=2):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
class SplitBlock(nn.Module):
def __init__(self, ratio):
super(SplitBlock, self).__init__()
self.ratio = ratio
def forward(self, x):
c = int(x.size(1) * self.ratio)
return x[:, :c, :, :], x[:, c:, :, :]
class BasicBlock(nn.Module):
def __init__(self, in_channels, split_ratio=0.5):
super(BasicBlock, self).__init__()
self.split = SplitBlock(split_ratio)
in_channels = int(in_channels * split_ratio)
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=1, padding=1, groups=in_channels, bias=False)
self.bn2 = nn.BatchNorm2d(in_channels)
self.conv3 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(in_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
x1, x2 = self.split(x)
out = F.relu(self.bn1(self.conv1(x2)))
out = self.bn2(self.conv2(out))
out = F.relu(self.bn3(self.conv3(out)))
out = torch.cat([x1, out], 1)
out = self.shuffle(out)
return out
class DownBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DownBlock, self).__init__()
mid_channels = out_channels // 2
# left
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=2, padding=1, groups=in_channels, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
# right
self.conv3 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(mid_channels)
self.conv4 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=3, stride=2, padding=1, groups=mid_channels, bias=False)
self.bn4 = nn.BatchNorm2d(mid_channels)
self.conv5 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=1, bias=False)
self.bn5 = nn.BatchNorm2d(mid_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
# left
out1 = self.bn1(self.conv1(x))
out1 = F.relu(self.bn2(self.conv2(out1)))
# right
out2 = F.relu(self.bn3(self.conv3(x)))
out2 = self.bn4(self.conv4(out2))
out2 = F.relu(self.bn5(self.conv5(out2)))
# concat
out = torch.cat([out1, out2], 1)
out = self.shuffle(out)
return out
class ShuffleNetV2(nn.Module):
def __init__(self, net_size):
super(ShuffleNetV2, self).__init__()
out_channels = configs[net_size]['out_channels']
num_blocks = configs[net_size]['num_blocks']
self.conv1 = nn.Conv2d(3, 24, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_channels = 24
self.layer1 = self._make_layer(out_channels[0], num_blocks[0])
self.layer2 = self._make_layer(out_channels[1], num_blocks[1])
self.layer3 = self._make_layer(out_channels[2], num_blocks[2])
self.conv2 = nn.Conv2d(out_channels[2], out_channels[3],
kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels[3])
self.linear = nn.Linear(out_channels[3], 10)
def _make_layer(self, out_channels, num_blocks):
layers = [DownBlock(self.in_channels, out_channels)]
for i in range(num_blocks):
layers.append(BasicBlock(out_channels))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# out = F.max_pool2d(out, 3, stride=2, padding=1)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
configs = {
0.5: {
'out_channels': (48, 96, 192, 1024),
'num_blocks': (3, 7, 3)
},
1: {
'out_channels': (116, 232, 464, 1024),
'num_blocks': (3, 7, 3)
},
1.5: {
'out_channels': (176, 352, 704, 1024),
'num_blocks': (3, 7, 3)
},
2: {
'out_channels': (224, 488, 976, 2048),
'num_blocks': (3, 7, 3)
}
}
def test():
net = ShuffleNetV2(net_size=0.5)
x = torch.randn(3, 3, 32, 32)
y = net(x)
print(y.shape)
# test()
| 5,530 | 32.932515 | 107 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/regnet.py | '''RegNet in PyTorch.
Paper: "Designing Network Design Spaces".
Reference: https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class SE(nn.Module):
'''Squeeze-and-Excitation block.'''
def __init__(self, in_planes, se_planes):
super(SE, self).__init__()
self.se1 = nn.Conv2d(in_planes, se_planes, kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_planes, in_planes, kernel_size=1, bias=True)
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1))
out = F.relu(self.se1(out))
out = self.se2(out).sigmoid()
out = x * out
return out
class Block(nn.Module):
def __init__(self, w_in, w_out, stride, group_width, bottleneck_ratio, se_ratio):
super(Block, self).__init__()
# 1x1
w_b = int(round(w_out * bottleneck_ratio))
self.conv1 = nn.Conv2d(w_in, w_b, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(w_b)
# 3x3
num_groups = w_b // group_width
self.conv2 = nn.Conv2d(w_b, w_b, kernel_size=3,
stride=stride, padding=1, groups=num_groups, bias=False)
self.bn2 = nn.BatchNorm2d(w_b)
# se
self.with_se = se_ratio > 0
if self.with_se:
w_se = int(round(w_in * se_ratio))
self.se = SE(w_b, w_se)
# 1x1
self.conv3 = nn.Conv2d(w_b, w_out, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(w_out)
self.shortcut = nn.Sequential()
if stride != 1 or w_in != w_out:
self.shortcut = nn.Sequential(
nn.Conv2d(w_in, w_out,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(w_out)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
if self.with_se:
out = self.se(out)
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class RegNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(RegNet, self).__init__()
self.cfg = cfg
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(0)
self.layer2 = self._make_layer(1)
self.layer3 = self._make_layer(2)
self.layer4 = self._make_layer(3)
self.linear = nn.Linear(self.cfg['widths'][-1], num_classes)
def _make_layer(self, idx):
depth = self.cfg['depths'][idx]
width = self.cfg['widths'][idx]
stride = self.cfg['strides'][idx]
group_width = self.cfg['group_width']
bottleneck_ratio = self.cfg['bottleneck_ratio']
se_ratio = self.cfg['se_ratio']
layers = []
for i in range(depth):
s = stride if i == 0 else 1
layers.append(Block(self.in_planes, width,
s, group_width, bottleneck_ratio, se_ratio))
self.in_planes = width
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def RegNetX_200MF():
cfg = {
'depths': [1, 1, 4, 7],
'widths': [24, 56, 152, 368],
'strides': [1, 1, 2, 2],
'group_width': 8,
'bottleneck_ratio': 1,
'se_ratio': 0,
}
return RegNet(cfg)
def RegNetX_400MF():
cfg = {
'depths': [1, 2, 7, 12],
'widths': [32, 64, 160, 384],
'strides': [1, 1, 2, 2],
'group_width': 16,
'bottleneck_ratio': 1,
'se_ratio': 0,
}
return RegNet(cfg)
def RegNetY_400MF():
cfg = {
'depths': [1, 2, 7, 12],
'widths': [32, 64, 160, 384],
'strides': [1, 1, 2, 2],
'group_width': 16,
'bottleneck_ratio': 1,
'se_ratio': 0.25,
}
return RegNet(cfg)
def test():
net = RegNetX_200MF()
print(net)
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.shape)
if __name__ == '__main__':
test()
| 4,548 | 28.160256 | 106 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/efficientnet.py | '''EfficientNet in PyTorch.
Paper: "EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks".
Reference: https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
def swish(x):
return x * x.sigmoid()
def drop_connect(x, drop_ratio):
keep_ratio = 1.0 - drop_ratio
mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
mask.bernoulli_(keep_ratio)
x.div_(keep_ratio)
x.mul_(mask)
return x
class SE(nn.Module):
'''Squeeze-and-Excitation block with Swish.'''
def __init__(self, in_channels, se_channels):
super(SE, self).__init__()
self.se1 = nn.Conv2d(in_channels, se_channels,
kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_channels, in_channels,
kernel_size=1, bias=True)
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1))
out = swish(self.se1(out))
out = self.se2(out).sigmoid()
out = x * out
return out
class Block(nn.Module):
'''expansion + depthwise + pointwise + squeeze-excitation'''
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
expand_ratio=1,
se_ratio=0.,
drop_rate=0.):
super(Block, self).__init__()
self.stride = stride
self.drop_rate = drop_rate
self.expand_ratio = expand_ratio
# Expansion
channels = expand_ratio * in_channels
self.conv1 = nn.Conv2d(in_channels,
channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn1 = nn.BatchNorm2d(channels)
# Depthwise conv
self.conv2 = nn.Conv2d(channels,
channels,
kernel_size=kernel_size,
stride=stride,
padding=(1 if kernel_size == 3 else 2),
groups=channels,
bias=False)
self.bn2 = nn.BatchNorm2d(channels)
# SE layers
se_channels = int(in_channels * se_ratio)
self.se = SE(channels, se_channels)
# Output
self.conv3 = nn.Conv2d(channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
# Skip connection if in and out shapes are the same (MV-V2 style)
self.has_skip = (stride == 1) and (in_channels == out_channels)
def forward(self, x):
out = x if self.expand_ratio == 1 else swish(self.bn1(self.conv1(x)))
out = swish(self.bn2(self.conv2(out)))
out = self.se(out)
out = self.bn3(self.conv3(out))
if self.has_skip:
if self.training and self.drop_rate > 0:
out = drop_connect(out, self.drop_rate)
out = out + x
return out
class EfficientNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(EfficientNet, self).__init__()
self.cfg = cfg
self.conv1 = nn.Conv2d(3,
32,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_channels=32)
self.linear = nn.Linear(cfg['out_channels'][-1], num_classes)
def _make_layers(self, in_channels):
layers = []
cfg = [self.cfg[k] for k in ['expansion', 'out_channels', 'num_blocks', 'kernel_size',
'stride']]
b = 0
blocks = sum(self.cfg['num_blocks'])
for expansion, out_channels, num_blocks, kernel_size, stride in zip(*cfg):
strides = [stride] + [1] * (num_blocks - 1)
for stride in strides:
drop_rate = self.cfg['drop_connect_rate'] * b / blocks
layers.append(
Block(in_channels,
out_channels,
kernel_size,
stride,
expansion,
se_ratio=0.25,
drop_rate=drop_rate))
in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = swish(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
dropout_rate = self.cfg['dropout_rate']
if self.training and dropout_rate > 0:
out = F.dropout(out, p=dropout_rate)
out = self.linear(out)
return out
def EfficientNetB0():
cfg = {
'num_blocks': [1, 2, 2, 3, 3, 4, 1],
'expansion': [1, 6, 6, 6, 6, 6, 6],
'out_channels': [16, 24, 40, 80, 112, 192, 320],
'kernel_size': [3, 3, 5, 3, 5, 5, 3],
'stride': [1, 2, 2, 2, 1, 2, 1],
'dropout_rate': 0.2,
'drop_connect_rate': 0.2,
}
return EfficientNet(cfg)
def test():
net = EfficientNetB0()
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.shape)
if __name__ == '__main__':
test()
| 5,719 | 31.5 | 106 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/pnasnet.py | '''PNASNet in PyTorch.
Paper: Progressive Neural Architecture Search
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class SepConv(nn.Module):
'''Separable Convolution.'''
def __init__(self, in_planes, out_planes, kernel_size, stride):
super(SepConv, self).__init__()
self.conv1 = nn.Conv2d(in_planes, out_planes,
kernel_size, stride,
padding=(kernel_size-1)//2,
bias=False, groups=in_planes)
self.bn1 = nn.BatchNorm2d(out_planes)
def forward(self, x):
return self.bn1(self.conv1(x))
class CellA(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(CellA, self).__init__()
self.stride = stride
self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride)
if stride==2:
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_planes)
def forward(self, x):
y1 = self.sep_conv1(x)
y2 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
if self.stride==2:
y2 = self.bn1(self.conv1(y2))
return F.relu(y1+y2)
class CellB(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(CellB, self).__init__()
self.stride = stride
# Left branch
self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride)
self.sep_conv2 = SepConv(in_planes, out_planes, kernel_size=3, stride=stride)
# Right branch
self.sep_conv3 = SepConv(in_planes, out_planes, kernel_size=5, stride=stride)
if stride==2:
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_planes)
# Reduce channels
self.conv2 = nn.Conv2d(2*out_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
# Left branch
y1 = self.sep_conv1(x)
y2 = self.sep_conv2(x)
# Right branch
y3 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
if self.stride==2:
y3 = self.bn1(self.conv1(y3))
y4 = self.sep_conv3(x)
# Concat & reduce channels
b1 = F.relu(y1+y2)
b2 = F.relu(y3+y4)
y = torch.cat([b1,b2], 1)
return F.relu(self.bn2(self.conv2(y)))
class PNASNet(nn.Module):
def __init__(self, cell_type, num_cells, num_planes):
super(PNASNet, self).__init__()
self.in_planes = num_planes
self.cell_type = cell_type
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_planes)
self.layer1 = self._make_layer(num_planes, num_cells=6)
self.layer2 = self._downsample(num_planes*2)
self.layer3 = self._make_layer(num_planes*2, num_cells=6)
self.layer4 = self._downsample(num_planes*4)
self.layer5 = self._make_layer(num_planes*4, num_cells=6)
self.linear = nn.Linear(num_planes*4, 10)
def _make_layer(self, planes, num_cells):
layers = []
for _ in range(num_cells):
layers.append(self.cell_type(self.in_planes, planes, stride=1))
self.in_planes = planes
return nn.Sequential(*layers)
def _downsample(self, planes):
layer = self.cell_type(self.in_planes, planes, stride=2)
self.in_planes = planes
return layer
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = F.avg_pool2d(out, 8)
out = self.linear(out.view(out.size(0), -1))
return out
def PNASNetA():
return PNASNet(CellA, num_cells=6, num_planes=44)
def PNASNetB():
return PNASNet(CellB, num_cells=6, num_planes=32)
def test():
net = PNASNetB()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 4,258 | 32.801587 | 105 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/resnet.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion *
planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3])
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
def test():
net = ResNet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
# test()
| 4,218 | 30.721805 | 83 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/dla_simple.py | '''Simplified version of DLA in PyTorch.
Note this implementation is not identical to the original paper version.
But it seems works fine.
See dla.py for the original paper version.
Reference:
Deep Layer Aggregation. https://arxiv.org/abs/1707.06484
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, kernel_size,
stride=1, padding=(kernel_size - 1) // 2, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, xs):
x = torch.cat(xs, 1)
out = F.relu(self.bn(self.conv(x)))
return out
class Tree(nn.Module):
def __init__(self, block, in_channels, out_channels, level=1, stride=1):
super(Tree, self).__init__()
self.root = Root(2*out_channels, out_channels)
if level == 1:
self.left_tree = block(in_channels, out_channels, stride=stride)
self.right_tree = block(out_channels, out_channels, stride=1)
else:
self.left_tree = Tree(block, in_channels,
out_channels, level=level-1, stride=stride)
self.right_tree = Tree(block, out_channels,
out_channels, level=level-1, stride=1)
def forward(self, x):
out1 = self.left_tree(x)
out2 = self.right_tree(out1)
out = self.root([out1, out2])
return out
class SimpleDLA(nn.Module):
def __init__(self, block=BasicBlock, num_classes=10):
super(SimpleDLA, self).__init__()
self.base = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(True)
)
self.layer1 = nn.Sequential(
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(True)
)
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(True)
)
self.layer3 = Tree(block, 32, 64, level=1, stride=1)
self.layer4 = Tree(block, 64, 128, level=2, stride=2)
self.layer5 = Tree(block, 128, 256, level=2, stride=2)
self.layer6 = Tree(block, 256, 512, level=1, stride=2)
self.linear = nn.Linear(512, num_classes)
def forward(self, x):
out = self.base(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = self.layer6(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = SimpleDLA()
print(net)
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y.size())
if __name__ == '__main__':
test()
| 4,084 | 30.666667 | 83 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/mobilenetv2.py | '''MobileNetV2 in PyTorch.
See the paper "Inverted Residuals and Linear Bottlenecks:
Mobile Networks for Classification, Detection and Segmentation" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init__(self, in_planes, out_planes, expansion, stride):
super(Block, self).__init__()
self.stride = stride
planes = expansion * in_planes
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 1 and in_planes != out_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out = out + self.shortcut(x) if self.stride==1 else out
return out
class MobileNetV2(nn.Module):
# (expansion, out_planes, num_blocks, stride)
cfg = [(1, 16, 1, 1),
(6, 24, 2, 1), # NOTE: change stride 2 -> 1 for CIFAR10
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1),
(6, 160, 3, 2),
(6, 320, 1, 1)]
def __init__(self, num_classes=10):
super(MobileNetV2, self).__init__()
# NOTE: change conv1 stride 2 -> 1 for CIFAR10
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(1280)
self.linear = nn.Linear(1280, num_classes)
def _make_layers(self, in_planes):
layers = []
for expansion, out_planes, num_blocks, stride in self.cfg:
strides = [stride] + [1]*(num_blocks-1)
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.relu(self.bn2(self.conv2(out)))
# NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = MobileNetV2()
x = torch.randn(2,3,32,32)
y = net(x)
print(y.size())
# test()
| 3,092 | 34.551724 | 114 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/vgg.py | '''VGG11/13/16/19 in Pytorch.'''
import torch
import torch.nn as nn
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 10)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
def test():
net = VGG('VGG11')
x = torch.randn(2,3,32,32)
y = net(x)
print(y.size())
# test()
| 1,442 | 29.0625 | 117 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/densenet.py | '''DenseNet in PyTorch.'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2*growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DenseNet121():
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32)
def DenseNet169():
return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32)
def DenseNet201():
return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32)
def DenseNet161():
return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48)
def densenet_cifar():
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=12)
def test():
net = densenet_cifar()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 3,542 | 31.805556 | 96 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/preact_resnet.py | '''Pre-activation ResNet in PyTorch.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv:1603.05027
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def PreActResNet18():
return PreActResNet(PreActBlock, [2,2,2,2])
def PreActResNet34():
return PreActResNet(PreActBlock, [3,4,6,3])
def PreActResNet50():
return PreActResNet(PreActBottleneck, [3,4,6,3])
def PreActResNet101():
return PreActResNet(PreActBottleneck, [3,4,23,3])
def PreActResNet152():
return PreActResNet(PreActBottleneck, [3,8,36,3])
def test():
net = PreActResNet18()
y = net((torch.randn(1,3,32,32)))
print(y.size())
# test()
| 4,078 | 33.277311 | 102 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/googlenet.py | '''GoogLeNet with PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
# 1x1 conv branch
self.b1 = nn.Sequential(
nn.Conv2d(in_planes, n1x1, kernel_size=1),
nn.BatchNorm2d(n1x1),
nn.ReLU(True),
)
# 1x1 conv -> 3x3 conv branch
self.b2 = nn.Sequential(
nn.Conv2d(in_planes, n3x3red, kernel_size=1),
nn.BatchNorm2d(n3x3red),
nn.ReLU(True),
nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
nn.BatchNorm2d(n3x3),
nn.ReLU(True),
)
# 1x1 conv -> 5x5 conv branch
self.b3 = nn.Sequential(
nn.Conv2d(in_planes, n5x5red, kernel_size=1),
nn.BatchNorm2d(n5x5red),
nn.ReLU(True),
nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
)
# 3x3 pool -> 1x1 conv branch
self.b4 = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
nn.Conv2d(in_planes, pool_planes, kernel_size=1),
nn.BatchNorm2d(pool_planes),
nn.ReLU(True),
)
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1,y2,y3,y4], 1)
class GoogLeNet(nn.Module):
def __init__(self):
super(GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=3, padding=1),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 10)
def forward(self, x):
out = self.pre_layers(x)
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
out = self.a4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = GoogLeNet()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test()
| 3,221 | 28.833333 | 83 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/resnext.py | '''ResNeXt in PyTorch.
See the paper "Aggregated Residual Transformations for Deep Neural Networks" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''Grouped convolution block.'''
expansion = 2
def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1):
super(Block, self).__init__()
group_width = cardinality * bottleneck_width
self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(group_width)
self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn2 = nn.BatchNorm2d(group_width)
self.conv3 = nn.Conv2d(group_width, self.expansion*group_width, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*group_width)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*group_width:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*group_width, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*group_width)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNeXt(nn.Module):
def __init__(self, num_blocks, cardinality, bottleneck_width, num_classes=10):
super(ResNeXt, self).__init__()
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(num_blocks[0], 1)
self.layer2 = self._make_layer(num_blocks[1], 2)
self.layer3 = self._make_layer(num_blocks[2], 2)
# self.layer4 = self._make_layer(num_blocks[3], 2)
self.linear = nn.Linear(cardinality*bottleneck_width*8, num_classes)
def _make_layer(self, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride))
self.in_planes = Block.expansion * self.cardinality * self.bottleneck_width
# Increase bottleneck_width by 2 after each stage.
self.bottleneck_width *= 2
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
# out = self.layer4(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNeXt29_2x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=2, bottleneck_width=64)
def ResNeXt29_4x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=4, bottleneck_width=64)
def ResNeXt29_8x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=8, bottleneck_width=64)
def ResNeXt29_32x4d():
return ResNeXt(num_blocks=[3,3,3], cardinality=32, bottleneck_width=4)
def test_resnext():
net = ResNeXt29_2x64d()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test_resnext()
| 3,478 | 35.239583 | 129 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/senet.py | '''SENet in PyTorch.
SENet is the winner of ImageNet-2017. The paper is not released yet.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w # New broadcasting feature from v0.2!
out += self.shortcut(x)
out = F.relu(out)
return out
class PreActBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1)
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w
out += shortcut
return out
class SENet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(SENet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def SENet18():
return SENet(PreActBlock, [2,2,2,2])
def test():
net = SENet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
| 4,027 | 32.016393 | 102 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/shufflenet.py | '''ShuffleNet in PyTorch.
See the paper "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).reshape(N,C,H,W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups):
super(Bottleneck, self).__init__()
self.stride = stride
mid_planes = out_planes/4
g = 1 if in_planes==24 else groups
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
out = F.relu(torch.cat([out,res], 1)) if self.stride==2 else F.relu(out+res)
return out
class ShuffleNet(nn.Module):
def __init__(self, cfg):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Linear(out_planes[2], 10)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
cat_planes = self.in_planes if i == 0 else 0
layers.append(Bottleneck(self.in_planes, out_planes-cat_planes, stride=stride, groups=groups))
self.in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ShuffleNetG2():
cfg = {
'out_planes': [200,400,800],
'num_blocks': [4,8,4],
'groups': 2
}
return ShuffleNet(cfg)
def ShuffleNetG3():
cfg = {
'out_planes': [240,480,960],
'num_blocks': [4,8,4],
'groups': 3
}
return ShuffleNet(cfg)
def test():
net = ShuffleNetG2()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 3,542 | 31.209091 | 126 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/lenet.py | '''LeNet in PyTorch.'''
import torch.nn as nn
import torch.nn.functional as F
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
| 699 | 28.166667 | 43 | py |
CorgiPile-PyTorch | CorgiPile-PyTorch-main/cifar_dl_bench/models/mobilenet.py | '''MobileNet in PyTorch.
See the paper "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications"
for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''Depthwise conv + Pointwise conv'''
def __init__(self, in_planes, out_planes, stride=1):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
return out
class MobileNet(nn.Module):
# (128,2) means conv planes=128, conv stride=2, by default conv stride=1
cfg = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024]
def __init__(self, num_classes=10):
super(MobileNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.linear = nn.Linear(1024, num_classes)
def _make_layers(self, in_planes):
layers = []
for x in self.cfg:
out_planes = x if isinstance(x, int) else x[0]
stride = 1 if isinstance(x, int) else x[1]
layers.append(Block(in_planes, out_planes, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = MobileNet()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test()
| 2,025 | 31.677419 | 123 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.