code stringlengths 3 6.57k |
|---|
state_dict.copy() |
load(module, prefix="") |
metadata.get(prefix[:-1], {}) |
module._modules.items() |
load(child, prefix + name + ".") |
hasattr(model, "transformer") |
all(not s.startswith('transformer.') |
state_dict.keys() |
load(start_model, prefix="") |
model.set_tied() |
LayerNorm(nn.Module) |
__init__(self, hidden_size, eps=1e-12) |
style (epsilon inside the square root) |
super(LayerNorm, self) |
__init__() |
nn.Parameter(torch.ones(hidden_size) |
nn.Parameter(torch.zeros(hidden_size) |
forward(self, x) |
x.mean(-1, keepdim=True) |
pow(2) |
mean(-1, keepdim=True) |
torch.sqrt(s + self.variance_epsilon) |
Conv1D(nn.Module) |
__init__(self, nf, nx) |
super(Conv1D, self) |
__init__() |
torch.empty(nx, nf) |
nn.init.normal_(w, std=0.02) |
Parameter(w) |
Parameter(torch.zeros(nf) |
forward(self, x) |
x.size() |
torch.addmm(self.bias, x.view(-1, x.size(-1) |
x.view(*size_out) |
Attention(nn.Module) |
__init__(self, nx, n_ctx, config, scale=False) |
super(Attention, self) |
__init__() |
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx) |
view(1, 1, n_ctx, n_ctx) |
Conv1D(n_state * 3, nx) |
Conv1D(n_state, nx) |
_attn(self, q, k, v) |
torch.matmul(q, k) |
math.sqrt(v.size(-1) |
w.size(-2) |
w.size(-1) |
nn.Softmax(dim=-1) |
torch.matmul(w, v) |
merge_heads(self, x) |
x.permute(0, 2, 1, 3) |
contiguous() |
x.size() |
x.size(-2) |
x.size(-1) |
x.view(*new_x_shape) |
split_heads(self, x, k=False) |
x.size() |
x.size(-1) |
x.view(*new_x_shape) |
x.permute(0, 2, 3, 1) |
x.permute(0, 2, 1, 3) |
forward(self, x, layer_past=None) |
self.c_attn(x) |
x.split(self.split_size, dim=2) |
self.split_heads(query) |
self.split_heads(key, k=True) |
self.split_heads(value) |
transpose(-2, -1) |
torch.cat((past_key, key) |
torch.cat((past_value, value) |
torch.stack((key.transpose(-2, -1) |
self._attn(query, key, value) |
self.merge_heads(a) |
self.c_proj(a) |
MLP(nn.Module) |
__init__(self, n_state, config) |
super(MLP, self) |
__init__() |
Conv1D(n_state, nx) |
Conv1D(nx, n_state) |
forward(self, x) |
self.act(self.c_fc(x) |
self.c_proj(h) |
Block(nn.Module) |
__init__(self, n_ctx, config, scale=False) |
super(Block, self) |
__init__() |
LayerNorm(nx, eps=config.layer_norm_epsilon) |
Attention(nx, n_ctx, config, scale) |
LayerNorm(nx, eps=config.layer_norm_epsilon) |
MLP(4 * nx, config) |
forward(self, x, layer_past=None) |
self.attn(self.ln_1(x) |
self.mlp(self.ln_2(x) |
Transformer(nn.Module) |
__init__(self, config) |
super() |
__init__() |
nn.Embedding(config.vocab_size, config.n_embd) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.