Commit
·
2ee6646
1
Parent(s):
e74dfc1
Upload Adapter
Browse files- config.json +6 -1
- model.py +61 -0
- pytorch_model.bin +3 -0
config.json
CHANGED
|
@@ -1,7 +1,12 @@
|
|
| 1 |
{
|
|
|
|
|
|
|
|
|
|
| 2 |
"auto_map": {
|
| 3 |
-
"AutoConfig": "config.AdapterConfig"
|
|
|
|
| 4 |
},
|
| 5 |
"model_type": "archinetai/adapter-A-v1",
|
|
|
|
| 6 |
"transformers_version": "4.24.0"
|
| 7 |
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"Adapter"
|
| 4 |
+
],
|
| 5 |
"auto_map": {
|
| 6 |
+
"AutoConfig": "config.AdapterConfig",
|
| 7 |
+
"AutoModel": "model.Adapter"
|
| 8 |
},
|
| 9 |
"model_type": "archinetai/adapter-A-v1",
|
| 10 |
+
"torch_dtype": "float32",
|
| 11 |
"transformers_version": "4.24.0"
|
| 12 |
}
|
model.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch import Tensor, nn
|
| 3 |
+
from transformers import PreTrainedModel
|
| 4 |
+
from .config import AdapterConfig
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Model(nn.Module):
|
| 8 |
+
|
| 9 |
+
def __init__(
|
| 10 |
+
self,
|
| 11 |
+
num_channels: int,
|
| 12 |
+
num_filters: int,
|
| 13 |
+
window_length: int,
|
| 14 |
+
stride: int,
|
| 15 |
+
):
|
| 16 |
+
super().__init__()
|
| 17 |
+
self.stride = stride
|
| 18 |
+
padding = window_length // 2 - stride // 2
|
| 19 |
+
self.conv = nn.Conv1d(
|
| 20 |
+
in_channels=num_channels,
|
| 21 |
+
out_channels=num_filters,
|
| 22 |
+
kernel_size=window_length,
|
| 23 |
+
stride=stride,
|
| 24 |
+
padding=padding,
|
| 25 |
+
padding_mode="reflect",
|
| 26 |
+
bias=False,
|
| 27 |
+
)
|
| 28 |
+
self.decode = nn.ConvTranspose1d(
|
| 29 |
+
in_channels=num_filters,
|
| 30 |
+
out_channels=num_channels,
|
| 31 |
+
kernel_size=window_length,
|
| 32 |
+
stride=stride,
|
| 33 |
+
padding=padding,
|
| 34 |
+
bias=False,
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
def encode(self, x: Tensor) -> Tensor:
|
| 38 |
+
return torch.tanh(self.conv(x))
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class Adapter(PreTrainedModel):
|
| 42 |
+
|
| 43 |
+
config_class = AdapterConfig
|
| 44 |
+
|
| 45 |
+
def __init__(self, config: AdapterConfig):
|
| 46 |
+
super().__init__(config)
|
| 47 |
+
|
| 48 |
+
self.model = Model(
|
| 49 |
+
num_channels=2,
|
| 50 |
+
num_filters=128,
|
| 51 |
+
window_length=128,
|
| 52 |
+
stride=64
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
def encode(self, x):
|
| 56 |
+
return self.model.encode(x)
|
| 57 |
+
|
| 58 |
+
def decode(self, x):
|
| 59 |
+
return self.model.decode(x)
|
| 60 |
+
|
| 61 |
+
|
pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e428e6cb74fc8e479aed1bb50537e1be4fe06202815abd4562245d639d410185
|
| 3 |
+
size 263143
|