Instructions to use nateraw/basic-ae-cifar10 with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use nateraw/basic-ae-cifar10 with Transformers:
# Load model directly from transformers import AutoModel model = AutoModel.from_pretrained("nateraw/basic-ae-cifar10", dtype="auto") - Notebooks
- Google Colab
- Kaggle
Commit ·
7eaf8a6
1
Parent(s): 31484bc
:tada: init
Browse files- config.json +1 -0
- hf_src/src/__init__.py +0 -0
- hf_src/src/model.py +65 -0
- pytorch_model.bin +3 -0
config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"input_dim": 3072, "hidden_dims": [256, 64, 16, 4, 2], "_src": {"module_name": "src.model", "member_name": "Autoencoder"}}
|
hf_src/src/__init__.py
ADDED
|
File without changes
|
hf_src/src/model.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from argparse import Namespace
|
| 2 |
+
from typing import Union, List, Tuple
|
| 3 |
+
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from auto_anything import ModelHubMixin
|
| 6 |
+
from torch import nn
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class Dense(nn.Module):
|
| 10 |
+
|
| 11 |
+
def __init__(self, input_dim, output_dim, bias=True, activation=nn.LeakyReLU, **kwargs):
|
| 12 |
+
super().__init__()
|
| 13 |
+
self.fc = nn.Linear(input_dim, output_dim, bias=bias)
|
| 14 |
+
nn.init.xavier_uniform_(self.fc.weight)
|
| 15 |
+
nn.init.constant_(self.fc.bias, 0.0)
|
| 16 |
+
self.activation = activation(**kwargs) if activation is not None else None
|
| 17 |
+
|
| 18 |
+
def forward(self, x):
|
| 19 |
+
if self.activation is None:
|
| 20 |
+
return self.fc(x)
|
| 21 |
+
return self.activation(self.fc(x))
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class Encoder(nn.Module):
|
| 25 |
+
def __init__(self, input_dim, *dims):
|
| 26 |
+
super().__init__()
|
| 27 |
+
dims = (input_dim,) + dims
|
| 28 |
+
self.layers = nn.Sequential(
|
| 29 |
+
*[Dense(dims[i], dims[i+1], negative_slope=0.4, inplace=True) for i in range(len(dims) - 1)]
|
| 30 |
+
)
|
| 31 |
+
def forward(self, x):
|
| 32 |
+
return self.layers(x)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class Decoder(nn.Module):
|
| 36 |
+
def __init__(self, output_dim, *dims):
|
| 37 |
+
super().__init__()
|
| 38 |
+
self.layers = nn.Sequential(
|
| 39 |
+
*[Dense(dims[i], dims[i + 1], negative_slope=0.4, inplace=True) for i in range(len(dims) - 1)]
|
| 40 |
+
+ [Dense(dims[-1], output_dim, activation=nn.Sigmoid)]
|
| 41 |
+
)
|
| 42 |
+
def forward(self, x):
|
| 43 |
+
return self.layers(x)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class Autoencoder(nn.Module, ModelHubMixin):
|
| 47 |
+
|
| 48 |
+
def __init__(self, input_dim: int = 784, hidden_dims: Tuple[int] = (256, 64, 16, 4, 2)):
|
| 49 |
+
super().__init__()
|
| 50 |
+
self.config = Namespace(input_dim=input_dim, hidden_dims=hidden_dims)
|
| 51 |
+
self.encoder = Encoder(self.config.input_dim, *self.config.hidden_dims)
|
| 52 |
+
self.decoder = Decoder(self.config.input_dim, *reversed(self.config.hidden_dims))
|
| 53 |
+
|
| 54 |
+
def forward(self, x):
|
| 55 |
+
x = x.flatten(1)
|
| 56 |
+
latent = self.encoder(x)
|
| 57 |
+
recon = self.decoder(latent)
|
| 58 |
+
loss = F.mse_loss(recon, x)
|
| 59 |
+
return recon, latent, loss
|
| 60 |
+
|
| 61 |
+
def save_pretrained(self, save_directory, **kwargs):
|
| 62 |
+
# assert 'config' not in kwargs, \
|
| 63 |
+
# "save_pretrained handles passing model config for you, please dont pass it"
|
| 64 |
+
super().save_pretrained(save_directory, config=self.config.__dict__, **kwargs)
|
| 65 |
+
# super().save_pretrained(save_directory, **kwargs)
|
pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a1a84e76ad111059570cb96d7ab805d26c759b83353c550630cf93caef811d2a
|
| 3 |
+
size 6453930
|