Upload pipeline.py with huggingface_hub
Browse files- pipeline.py +106 -0
pipeline.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, List, Union
|
| 2 |
+
import os
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import numpy as np
|
| 6 |
+
from torch.nn import TransformerEncoder, TransformerEncoderLayer
|
| 7 |
+
|
| 8 |
+
class PreTrainedPipeline():
|
| 9 |
+
|
| 10 |
+
def __init__(self, path="./", device=(torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu"))):
|
| 11 |
+
# IMPLEMENT_THIS
|
| 12 |
+
# Preload all the elements you are going to need at inference.
|
| 13 |
+
# For instance your model, processors, tokenizer that might be needed.
|
| 14 |
+
# This function is only called once, so do all the heavy processing I/O here"""
|
| 15 |
+
self.device = device
|
| 16 |
+
self.mean = np.load("train_mean.npy")
|
| 17 |
+
self.std = np.load("train_std.npy")
|
| 18 |
+
self.selected_keys = (
|
| 19 |
+
"sex", "has_photo", "has_mobile", "followers_count", "relation", "life_main", "people_main"
|
| 20 |
+
)
|
| 21 |
+
BATCH_SIZE = 1024
|
| 22 |
+
MAX_LR = 1e-4
|
| 23 |
+
EPOCHS = 50
|
| 24 |
+
D_MODEL = 512
|
| 25 |
+
NHEAD = 16
|
| 26 |
+
NUM_LAYERS = 32
|
| 27 |
+
DROPOUT = 0.2
|
| 28 |
+
self.model = self.TabularTransformer(
|
| 29 |
+
input_dim=7,
|
| 30 |
+
d_model=D_MODEL,
|
| 31 |
+
nhead=NHEAD,
|
| 32 |
+
num_layers=NUM_LAYERS,
|
| 33 |
+
dropout=DROPOUT
|
| 34 |
+
).to(self.device)
|
| 35 |
+
self.model.load_state_dict(torch.load(os.path.join(path, "pytorch_model.bin"), weights_only=True))
|
| 36 |
+
self.model.eval()
|
| 37 |
+
def _convert_inputs(self, inputs):
|
| 38 |
+
data = inputs.get("data", {})
|
| 39 |
+
# Фильтрация ключей: оставляем только выбранные и существующие в данных
|
| 40 |
+
valid_keys = [key for key in self.selected_keys if key in data]
|
| 41 |
+
# Определение минимальной длины среди выбранных колонок
|
| 42 |
+
lengths = [len(data[key]) for key in valid_keys if key in data]
|
| 43 |
+
if not lengths:
|
| 44 |
+
return []
|
| 45 |
+
num_rows = min(lengths)
|
| 46 |
+
# Сборка списка строк (только с выбранными ключами)
|
| 47 |
+
return [
|
| 48 |
+
{key: data[key][i] for key in valid_keys}
|
| 49 |
+
for i in range(num_rows)
|
| 50 |
+
]
|
| 51 |
+
def _normalize(self, flist):
|
| 52 |
+
return (flist - self.mean) / (self.std + 1e-8)
|
| 53 |
+
def __call__(
|
| 54 |
+
self, inputs: Dict[str, Dict[str, List[Union[str, float]]]]
|
| 55 |
+
) -> List[Union[str, float, bool]]:
|
| 56 |
+
"""
|
| 57 |
+
Args:
|
| 58 |
+
inputs (:obj:`dict`):
|
| 59 |
+
a dictionary containing a key 'data' mapping to a dict in which
|
| 60 |
+
the values represent each column.
|
| 61 |
+
Return:
|
| 62 |
+
A :obj:`list` of floats or strings: The classification output for each row.
|
| 63 |
+
"""
|
| 64 |
+
# IMPLEMENT_THIS
|
| 65 |
+
ppinputs = self._convert_inputs(inputs)
|
| 66 |
+
modout = list()
|
| 67 |
+
for i in ppinputs:
|
| 68 |
+
newstr = [i["sex"], i["has_photo"], i["has_mobile"], i["followers_count"],
|
| 69 |
+
i["relation"], i["life_main"], i["people_main"]]
|
| 70 |
+
modell = torch.tensor(self._normalize(newstr), dtype=torch.float32).unsqueeze(0).to(self.device)
|
| 71 |
+
modout.append(self.model(modell).squeeze())
|
| 72 |
+
print([i.item() > 0 for i in torch.stack(modout)])
|
| 73 |
+
|
| 74 |
+
class TabularTransformer(nn.Module):
|
| 75 |
+
def __init__(self, input_dim, d_model, nhead, num_layers, dropout):
|
| 76 |
+
super().__init__()
|
| 77 |
+
self.input_proj = nn.Linear(input_dim, d_model)
|
| 78 |
+
|
| 79 |
+
encoder_layers = TransformerEncoderLayer(
|
| 80 |
+
d_model=d_model,
|
| 81 |
+
nhead=nhead,
|
| 82 |
+
dim_feedforward=d_model*4,
|
| 83 |
+
dropout=dropout,
|
| 84 |
+
activation='gelu',
|
| 85 |
+
batch_first=True
|
| 86 |
+
)
|
| 87 |
+
self.transformer = TransformerEncoder(encoder_layers, num_layers)
|
| 88 |
+
|
| 89 |
+
self.head = nn.Sequential(
|
| 90 |
+
nn.LayerNorm(d_model),
|
| 91 |
+
nn.Linear(d_model, 1)
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
self._init_weights()
|
| 95 |
+
|
| 96 |
+
def _init_weights(self):
|
| 97 |
+
for module in self.modules():
|
| 98 |
+
if isinstance(module, nn.Linear):
|
| 99 |
+
nn.init.xavier_normal_(module.weight)
|
| 100 |
+
if module.bias is not None:
|
| 101 |
+
nn.init.zeros_(module.bias)
|
| 102 |
+
|
| 103 |
+
def forward(self, x):
|
| 104 |
+
x = self.input_proj(x)
|
| 105 |
+
x = self.transformer(x.unsqueeze(1)).squeeze(1)
|
| 106 |
+
return self.head(x)
|