Spaces:
Configuration error
Configuration error
Upload 4 files
Browse files
README.md
CHANGED
|
@@ -1,12 +1,36 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Моя нейросеть (~10M параметров)
|
| 2 |
+
Это приложение реализует трансформерную модель с ~10M параметров для генерации текста. Модель использует PyTorch и предоставляет интерфейс через Gradio.
|
| 3 |
+
Установка
|
| 4 |
+
|
| 5 |
+
Убедитесь, что все зависимости установлены:
|
| 6 |
+
|
| 7 |
+
torch==2.0.1
|
| 8 |
+
gradio==4.44.0
|
| 9 |
+
torchtext==0.15.2
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
Для работы приложения требуется файл весов model.pt. Чтобы обучить модель:
|
| 13 |
+
|
| 14 |
+
Запустите train.py локально или на облачной платформе (например, Google Colab) с вашим датасетом.
|
| 15 |
+
Скопируйте полученный model.pt в корень репозитория.
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
Использование
|
| 20 |
+
|
| 21 |
+
Запустите приложение через Hugging Face Spaces или локально:python app.py
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
Введите начальный текст в интерфейсе Gradio, и модель продолжит его.
|
| 25 |
+
|
| 26 |
+
Обучение
|
| 27 |
+
|
| 28 |
+
Для обучения замените sample_data в train.py на ваш текстовый датасет (например, WikiText).
|
| 29 |
+
Запустите train.py на машине с GPU для ускорения.
|
| 30 |
+
После обучения загрузите model.pt в репозиторий.
|
| 31 |
+
|
| 32 |
+
Замечания
|
| 33 |
+
|
| 34 |
+
Модель оптимизирована для работы на бесплатном оборудовании Hugging Face Spaces. Если возникают проблемы с памятью, уменьшите EMBED_SIZE или NUM_LAYERS в app.py и train.py.
|
| 35 |
+
Для улучшения качества генерации увеличьте размер датасета и количество эпох обучения.
|
| 36 |
+
|
app.py
CHANGED
|
@@ -1,64 +1,110 @@
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
def
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
top_p,
|
| 17 |
-
):
|
| 18 |
-
messages = [{"role": "system", "content": system_message}]
|
| 19 |
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
-
|
|
|
|
|
|
|
| 27 |
|
| 28 |
-
|
|
|
|
|
|
|
| 29 |
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
top_p=top_p,
|
| 36 |
-
):
|
| 37 |
-
token = message.choices[0].delta.content
|
| 38 |
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
| 41 |
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
| 51 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
| 52 |
-
gr.Slider(
|
| 53 |
-
minimum=0.1,
|
| 54 |
-
maximum=1.0,
|
| 55 |
-
value=0.95,
|
| 56 |
-
step=0.05,
|
| 57 |
-
label="Top-p (nucleus sampling)",
|
| 58 |
-
),
|
| 59 |
-
],
|
| 60 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
|
|
|
|
| 63 |
if __name__ == "__main__":
|
| 64 |
-
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
import gradio as gr
|
| 4 |
+
from torch.nn import TransformerDecoder, TransformerDecoderLayer
|
| 5 |
+
from torchtext.data.utils import get_tokenizer
|
| 6 |
+
from torchtext.vocab import build_vocab_from_iterator
|
| 7 |
+
import math
|
| 8 |
|
| 9 |
+
# Параметры модели
|
| 10 |
+
VOCAB_SIZE = 10000 # Размер словаря
|
| 11 |
+
EMBED_SIZE = 256 # Размер эмбеддингов
|
| 12 |
+
NUM_HEADS = 8 # Количество голов в трансформере
|
| 13 |
+
NUM_LAYERS = 6 # Количество слоев
|
| 14 |
+
FFN_DIM = 512 # Размер скрытого слоя в FFN
|
| 15 |
+
DROPOUT = 0.1
|
| 16 |
|
| 17 |
+
# Определение модели
|
| 18 |
+
class TransformerModel(nn.Module):
|
| 19 |
+
def __init__(self, vocab_size, embed_size, num_heads, num_layers, ffn_dim, dropout):
|
| 20 |
+
super(TransformerModel, self).__init__()
|
| 21 |
+
self.embedding = nn.Embedding(vocab_size, embed_size)
|
| 22 |
+
self.pos_encoder = PositionalEncoding(embed_size, dropout)
|
| 23 |
+
decoder_layer = TransformerDecoderLayer(embed_size, num_heads, ffn_dim, dropout)
|
| 24 |
+
self.transformer_decoder = TransformerDecoder(decoder_layer, num_layers)
|
| 25 |
+
self.fc_out = nn.Linear(embed_size, vocab_size)
|
| 26 |
+
self.embed_size = embed_size
|
| 27 |
|
| 28 |
+
def forward(self, src, src_mask=None):
|
| 29 |
+
src = self.embedding(src) * math.sqrt(self.embed_size)
|
| 30 |
+
src = self.pos_encoder(src)
|
| 31 |
+
output = self.transformer_decoder(src, memory=None, tgt_mask=src_mask)
|
| 32 |
+
output = self.fc_out(output)
|
| 33 |
+
return output
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
+
class PositionalEncoding(nn.Module):
|
| 36 |
+
def __init__(self, embed_size, dropout, max_len=5000):
|
| 37 |
+
super(PositionalEncoding, self).__init__()
|
| 38 |
+
self.dropout = nn.Dropout(p=dropout)
|
| 39 |
+
pe = torch.zeros(max_len, embed_size)
|
| 40 |
+
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
|
| 41 |
+
div_term = torch.exp(torch.arange(0, embed_size, 2).float() * (-math.log(10000.0) / embed_size))
|
| 42 |
+
pe[:, 0::2] = torch.sin(position * div_term)
|
| 43 |
+
pe[:, 1::2] = torch.cos(position * div_term)
|
| 44 |
+
pe = pe.unsqueeze(0)
|
| 45 |
+
self.register_buffer('pe', pe)
|
| 46 |
|
| 47 |
+
def forward(self, x):
|
| 48 |
+
x = x + self.pe[:, :x.size(1)]
|
| 49 |
+
return self.dropout(x)
|
| 50 |
|
| 51 |
+
# Подсчет параметров
|
| 52 |
+
def count_parameters(model):
|
| 53 |
+
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 54 |
|
| 55 |
+
# Токенизатор и словарь
|
| 56 |
+
tokenizer = get_tokenizer('basic_english')
|
| 57 |
+
def yield_tokens(data_iter):
|
| 58 |
+
for text in data_iter:
|
| 59 |
+
yield tokenizer(text)
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
+
# Пример данных (замените на свой датасет)
|
| 62 |
+
sample_data = ["Hello world", "This is a test", "Build a neural network"] * 1000
|
| 63 |
+
vocab = build_vocab_from_iterator(yield_tokens(sample_data), specials=['<unk>', '<pad>'])
|
| 64 |
+
vocab.set_default_index(vocab['<unk>'])
|
| 65 |
|
| 66 |
+
# Инициализация модели
|
| 67 |
+
model = TransformerModel(
|
| 68 |
+
vocab_size=VOCAB_SIZE,
|
| 69 |
+
embed_size=EMBED_SIZE,
|
| 70 |
+
num_heads=NUM_HEADS,
|
| 71 |
+
num_layers=NUM_LAYERS,
|
| 72 |
+
ffn_dim=FFN_DIM,
|
| 73 |
+
dropout=DROPOUT
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
)
|
| 75 |
+
print(f"Количество параметров модели: {count_parameters(model)}")
|
| 76 |
+
|
| 77 |
+
# Загрузка обученных весов (если есть)
|
| 78 |
+
try:
|
| 79 |
+
model.load_state_dict(torch.load("model.pt"))
|
| 80 |
+
except FileNotFoundError:
|
| 81 |
+
print("Веса модели не найдены. Запустите train.py для обучения.")
|
| 82 |
|
| 83 |
+
# Функция генерации текста
|
| 84 |
+
def generate_text(prompt, max_length=50):
|
| 85 |
+
model.eval()
|
| 86 |
+
tokens = tokenizer(prompt)
|
| 87 |
+
indices = [vocab[token] for token in tokens]
|
| 88 |
+
src = torch.tensor(indices, dtype=torch.long).unsqueeze(0)
|
| 89 |
+
for _ in range(max_length):
|
| 90 |
+
with torch.no_grad():
|
| 91 |
+
output = model(src)
|
| 92 |
+
next_token = output[:, -1, :].argmax(-1).item()
|
| 93 |
+
src = torch.cat([src, torch.tensor([[next_token]], dtype=torch.long)], dim=-1)
|
| 94 |
+
if next_token == vocab['<pad>']:
|
| 95 |
+
break
|
| 96 |
+
generated = [vocab.get_itos()[idx] for idx in src.squeeze().tolist()]
|
| 97 |
+
return ' '.join(generated)
|
| 98 |
+
|
| 99 |
+
# Интерфейс Gradio
|
| 100 |
+
iface = gr.Interface(
|
| 101 |
+
fn=generate_text,
|
| 102 |
+
inputs=gr.Textbox(lines=2, placeholder="Введите начало текста..."),
|
| 103 |
+
outputs="text",
|
| 104 |
+
title="Моя нейросеть (~10M параметров)",
|
| 105 |
+
description="Введите текст, и модель продолжит его."
|
| 106 |
+
)
|
| 107 |
|
| 108 |
+
# Запуск интерфейса
|
| 109 |
if __name__ == "__main__":
|
| 110 |
+
iface.launch()
|
requirements.txt
CHANGED
|
@@ -1 +1,3 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
| 1 |
+
torch==2.0.1
|
| 2 |
+
gradio==4.44.0
|
| 3 |
+
torchtext==0.15.2
|
train.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from torchtext.data.utils import get_tokenizer
|
| 4 |
+
from torchtext.vocab import build_vocab_from_iterator
|
| 5 |
+
|
| 6 |
+
# Параметры модели (должны совпадать с app.py)
|
| 7 |
+
VOCAB_SIZE = 10000
|
| 8 |
+
EMBED_SIZE = 256
|
| 9 |
+
NUM_HEADS = 8
|
| 10 |
+
NUM_LAYERS = 6
|
| 11 |
+
FFN_DIM = 512
|
| 12 |
+
DROPOUT = 0.1
|
| 13 |
+
|
| 14 |
+
# Определение модели (копия из app.py для независимости)
|
| 15 |
+
class TransformerModel(nn.Module):
|
| 16 |
+
def __init__(self, vocab_size, embed_size, num_heads, num_layers, ffn_dim, dropout):
|
| 17 |
+
super(TransformerModel, self).__init__()
|
| 18 |
+
self.embedding = nn.Embedding(vocab_size, embed_size)
|
| 19 |
+
self.pos_encoder = PositionalEncoding(embed_size, dropout)
|
| 20 |
+
decoder_layer = TransformerDecoderLayer(embed_size, num_heads, ffn_dim, dropout)
|
| 21 |
+
self.transformer_decoder = TransformerDecoder(decoder_layer, num_layers)
|
| 22 |
+
self.fc_out = nn.Linear(embed_size, vocab_size)
|
| 23 |
+
self.embed_size = embed_size
|
| 24 |
+
|
| 25 |
+
def forward(self, src, src_mask=None):
|
| 26 |
+
src = self.embedding(src) * math.sqrt(self.embed_size)
|
| 27 |
+
src = self.pos_encoder(src)
|
| 28 |
+
output = self.transformer_decoder(src, memory=None, tgt_mask=src_mask)
|
| 29 |
+
output = self.fc_out(output)
|
| 30 |
+
return output
|
| 31 |
+
|
| 32 |
+
class PositionalEncoding(nn.Module):
|
| 33 |
+
def __init__(self, embed_size, dropout, max_len=5000):
|
| 34 |
+
super(PositionalEncoding, self).__init__()
|
| 35 |
+
self.dropout = nn.Dropout(p=dropout)
|
| 36 |
+
pe = torch.zeros(max_len, embed_size)
|
| 37 |
+
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
|
| 38 |
+
div_term = torch.exp(torch.arange(0, embed_size, 2).float() * (-math.log(10000.0) / embed_size))
|
| 39 |
+
pe[:, 0::2] = torch.sin(position * div_term)
|
| 40 |
+
pe[:, 1::2] = torch.cos(position * div_term)
|
| 41 |
+
pe = pe.unsqueeze(0)
|
| 42 |
+
self.register_buffer('pe', pe)
|
| 43 |
+
|
| 44 |
+
def forward(self, x):
|
| 45 |
+
x = x + self.pe[:, :x.size(1)]
|
| 46 |
+
return self.dropout(x)
|
| 47 |
+
|
| 48 |
+
# Токенизатор и словарь
|
| 49 |
+
tokenizer = get_tokenizer('basic_english')
|
| 50 |
+
def yield_tokens(data_iter):
|
| 51 |
+
for text in data_iter:
|
| 52 |
+
yield tokenizer(text)
|
| 53 |
+
|
| 54 |
+
# Пример данных (замените на свой датасет)
|
| 55 |
+
sample_data = ["Hello world", "This is a test", "Build a neural network"] * 1000
|
| 56 |
+
vocab = build_vocab_from_iterator(yield_tokens(sample_data), specials=['<unk>', '<pad>'])
|
| 57 |
+
vocab.set_default_index(vocab['<unk>'])
|
| 58 |
+
|
| 59 |
+
# Инициализация модели
|
| 60 |
+
model = TransformerModel(
|
| 61 |
+
vocab_size=VOCAB_SIZE,
|
| 62 |
+
embed_size=EMBED_SIZE,
|
| 63 |
+
num_heads=NUM_HEADS,
|
| 64 |
+
num_layers=NUM_LAYERS,
|
| 65 |
+
ffn_dim=FFN_DIM,
|
| 66 |
+
dropout=DROPOUT
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
# Функция обучения
|
| 70 |
+
def train_model(model, data, epochs=5, device='cpu'):
|
| 71 |
+
model = model.to(device)
|
| 72 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
|
| 73 |
+
criterion = nn.CrossEntropyLoss()
|
| 74 |
+
model.train()
|
| 75 |
+
for epoch in range(epochs):
|
| 76 |
+
total_loss = 0
|
| 77 |
+
for text in data:
|
| 78 |
+
tokens = tokenizer(text)
|
| 79 |
+
indices = [vocab[token] for token in tokens][:50] # Ограничение длины
|
| 80 |
+
if len(indices) < 2:
|
| 81 |
+
continue
|
| 82 |
+
src = torch.tensor(indices[:-1], dtype=torch.long).unsqueeze(0).to(device)
|
| 83 |
+
tgt = torch.tensor(indices[1:], dtype=torch.long).unsqueeze(0).to(device)
|
| 84 |
+
optimizer.zero_grad()
|
| 85 |
+
output = model(src)
|
| 86 |
+
loss = criterion(output.view(-1, VOCAB_SIZE), tgt.view(-1))
|
| 87 |
+
loss.backward()
|
| 88 |
+
optimizer.step()
|
| 89 |
+
total_loss += loss.item()
|
| 90 |
+
print(f"Epoch {epoch+1}, Loss: {total_loss / len(data)}")
|
| 91 |
+
torch.save(model.state_dict(), "model.pt")
|
| 92 |
+
|
| 93 |
+
# Запуск обучения
|
| 94 |
+
if __name__ == "__main__":
|
| 95 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 96 |
+
train_model(model, sample_data, epochs=5, device=device)
|