Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,6 +2,135 @@ import gradio as gr
|
|
| 2 |
import torch
|
| 3 |
import torch.nn as nn
|
| 4 |
import sentencepiece as spm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
# Set device
|
| 7 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
@@ -16,7 +145,6 @@ model = torch.load(model_path, map_location=device, weights_only=False)
|
|
| 16 |
model.eval()
|
| 17 |
model = model.to(device)
|
| 18 |
|
| 19 |
-
|
| 20 |
def generate_code(pseudocode, max_len):
|
| 21 |
"""Generate C++ code from pseudocode with streaming output."""
|
| 22 |
model.eval()
|
|
@@ -33,13 +161,12 @@ def generate_code(pseudocode, max_len):
|
|
| 33 |
tgt = torch.cat([tgt, torch.tensor([[next_token]], device=device)], dim=1)
|
| 34 |
response = sp_code.decode_ids(generated_tokens)
|
| 35 |
yield response # Yield partial output
|
| 36 |
-
if next_token == 5: # <END>
|
| 37 |
break
|
| 38 |
yield response # Final output
|
| 39 |
|
| 40 |
def respond(message, history, max_tokens):
|
| 41 |
"""Wrapper for Gradio interface."""
|
| 42 |
-
# Ignore history since it's one-shot generation
|
| 43 |
for response in generate_code(message, max_tokens):
|
| 44 |
yield response
|
| 45 |
|
|
|
|
| 2 |
import torch
|
| 3 |
import torch.nn as nn
|
| 4 |
import sentencepiece as spm
|
| 5 |
+
import math
|
| 6 |
+
|
| 7 |
+
# Define Transformer components
|
| 8 |
+
class MultiHeadAttention(nn.Module):
|
| 9 |
+
def __init__(self, d_model, num_heads):
|
| 10 |
+
super(MultiHeadAttention, self).__init__()
|
| 11 |
+
assert d_model % num_heads == 0
|
| 12 |
+
self.d_model = d_model
|
| 13 |
+
self.num_heads = num_heads
|
| 14 |
+
self.d_k = d_model // num_heads
|
| 15 |
+
self.W_q = nn.Linear(d_model, d_model)
|
| 16 |
+
self.W_k = nn.Linear(d_model, d_model)
|
| 17 |
+
self.W_v = nn.Linear(d_model, d_model)
|
| 18 |
+
self.W_o = nn.Linear(d_model, d_model)
|
| 19 |
+
|
| 20 |
+
def scaled_dot_product_attention(self, Q, K, V, mask=None):
|
| 21 |
+
attn_scores = torch.matmul(Q, K.transpose(-2, -1)) / math.sqrt(self.d_k)
|
| 22 |
+
if mask is not None:
|
| 23 |
+
attn_scores = attn_scores.masked_fill(mask == 0, -1e9)
|
| 24 |
+
attn_probs = torch.softmax(attn_scores, dim=-1)
|
| 25 |
+
output = torch.matmul(attn_probs, V)
|
| 26 |
+
return output
|
| 27 |
+
|
| 28 |
+
def split_heads(self, x):
|
| 29 |
+
batch_size, seq_length, d_model = x.size()
|
| 30 |
+
return x.view(batch_size, seq_length, self.num_heads, self.d_k).transpose(1, 2)
|
| 31 |
+
|
| 32 |
+
def combine_heads(self, x):
|
| 33 |
+
batch_size, _, seq_length, d_k = x.size()
|
| 34 |
+
return x.transpose(1, 2).contiguous().view(batch_size, seq_length, self.d_model)
|
| 35 |
+
|
| 36 |
+
def forward(self, Q, K, V, mask=None):
|
| 37 |
+
Q = self.split_heads(self.W_q(Q))
|
| 38 |
+
K = self.split_heads(self.W_k(K))
|
| 39 |
+
V = self.split_heads(self.W_v(V))
|
| 40 |
+
attn_output = self.scaled_dot_product_attention(Q, K, V, mask)
|
| 41 |
+
output = self.W_o(self.combine_heads(attn_output))
|
| 42 |
+
return output
|
| 43 |
+
|
| 44 |
+
class PositionWiseFeedForward(nn.Module):
|
| 45 |
+
def __init__(self, d_model, d_ff):
|
| 46 |
+
super(PositionWiseFeedForward, self).__init__()
|
| 47 |
+
self.fc1 = nn.Linear(d_model, d_ff)
|
| 48 |
+
self.fc2 = nn.Linear(d_ff, d_model)
|
| 49 |
+
self.relu = nn.ReLU()
|
| 50 |
+
|
| 51 |
+
def forward(self, x):
|
| 52 |
+
return self.fc2(self.relu(self.fc1(x)))
|
| 53 |
+
|
| 54 |
+
class PositionalEncoding(nn.Module):
|
| 55 |
+
def __init__(self, d_model, max_seq_length):
|
| 56 |
+
super(PositionalEncoding, self).__init__()
|
| 57 |
+
pe = torch.zeros(max_seq_length, d_model)
|
| 58 |
+
position = torch.arange(0, max_seq_length, dtype=torch.float).unsqueeze(1)
|
| 59 |
+
div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model))
|
| 60 |
+
pe[:, 0::2] = torch.sin(position * div_term)
|
| 61 |
+
pe[:, 1::2] = torch.cos(position * div_term)
|
| 62 |
+
self.register_buffer('pe', pe.unsqueeze(0))
|
| 63 |
+
|
| 64 |
+
def forward(self, x):
|
| 65 |
+
return x + self.pe[:, :x.size(1)]
|
| 66 |
+
|
| 67 |
+
class EncoderLayer(nn.Module):
|
| 68 |
+
def __init__(self, d_model, num_heads, d_ff, dropout):
|
| 69 |
+
super(EncoderLayer, self).__init__()
|
| 70 |
+
self.self_attn = MultiHeadAttention(d_model, num_heads)
|
| 71 |
+
self.feed_forward = PositionWiseFeedForward(d_model, d_ff)
|
| 72 |
+
self.norm1 = nn.LayerNorm(d_model)
|
| 73 |
+
self.norm2 = nn.LayerNorm(d_model)
|
| 74 |
+
self.dropout = nn.Dropout(dropout)
|
| 75 |
+
|
| 76 |
+
def forward(self, x, mask):
|
| 77 |
+
attn_output = self.self_attn(x, x, x, mask)
|
| 78 |
+
x = self.norm1(x + self.dropout(attn_output))
|
| 79 |
+
ff_output = self.feed_forward(x)
|
| 80 |
+
x = self.norm2(x + self.dropout(ff_output))
|
| 81 |
+
return x
|
| 82 |
+
|
| 83 |
+
class DecoderLayer(nn.Module):
|
| 84 |
+
def __init__(self, d_model, num_heads, d_ff, dropout):
|
| 85 |
+
super(DecoderLayer, self).__init__()
|
| 86 |
+
self.self_attn = MultiHeadAttention(d_model, num_heads)
|
| 87 |
+
self.cross_attn = MultiHeadAttention(d_model, num_heads)
|
| 88 |
+
self.feed_forward = PositionWiseFeedForward(d_model, d_ff)
|
| 89 |
+
self.norm1 = nn.LayerNorm(d_model)
|
| 90 |
+
self.norm2 = nn.LayerNorm(d_model)
|
| 91 |
+
self.norm3 = nn.LayerNorm(d_model)
|
| 92 |
+
self.dropout = nn.Dropout(dropout)
|
| 93 |
+
|
| 94 |
+
def forward(self, x, enc_output, src_mask, tgt_mask):
|
| 95 |
+
attn_output = self.self_attn(x, x, x, tgt_mask)
|
| 96 |
+
x = self.norm1(x + self.dropout(attn_output))
|
| 97 |
+
attn_output = self.cross_attn(x, enc_output, enc_output, src_mask)
|
| 98 |
+
x = self.norm2(x + self.dropout(attn_output))
|
| 99 |
+
ff_output = self.feed_forward(x)
|
| 100 |
+
x = self.norm3(x + self.dropout(ff_output))
|
| 101 |
+
return x
|
| 102 |
+
|
| 103 |
+
class Transformer(nn.Module):
|
| 104 |
+
def __init__(self, src_vocab_size, tgt_vocab_size, d_model, num_heads, num_layers, d_ff, max_seq_length, dropout):
|
| 105 |
+
super(Transformer, self).__init__()
|
| 106 |
+
self.encoder_embedding = nn.Embedding(src_vocab_size, d_model)
|
| 107 |
+
self.decoder_embedding = nn.Embedding(tgt_vocab_size, d_model)
|
| 108 |
+
self.positional_encoding = PositionalEncoding(d_model, max_seq_length)
|
| 109 |
+
self.encoder_layers = nn.ModuleList([EncoderLayer(d_model, num_heads, d_ff, dropout) for _ in range(num_layers)])
|
| 110 |
+
self.decoder_layers = nn.ModuleList([DecoderLayer(d_model, num_heads, d_ff, dropout) for _ in range(num_layers)])
|
| 111 |
+
self.fc = nn.Linear(d_model, tgt_vocab_size)
|
| 112 |
+
self.dropout = nn.Dropout(dropout)
|
| 113 |
+
|
| 114 |
+
def generate_mask(self, src, tgt):
|
| 115 |
+
src_mask = (src != 0).unsqueeze(1).unsqueeze(2)
|
| 116 |
+
tgt_mask = (tgt != 0).unsqueeze(1).unsqueeze(3)
|
| 117 |
+
seq_length = tgt.size(1)
|
| 118 |
+
nopeak_mask = (1 - torch.triu(torch.ones(1, seq_length, seq_length), diagonal=1)).bool()
|
| 119 |
+
tgt_mask = tgt_mask & nopeak_mask
|
| 120 |
+
return src_mask, tgt_mask
|
| 121 |
+
|
| 122 |
+
def forward(self, src, tgt):
|
| 123 |
+
src_mask, tgt_mask = self.generate_mask(src, tgt)
|
| 124 |
+
src_embedded = self.dropout(self.positional_encoding(self.encoder_embedding(src)))
|
| 125 |
+
tgt_embedded = self.dropout(self.positional_encoding(self.decoder_embedding(tgt)))
|
| 126 |
+
enc_output = src_embedded
|
| 127 |
+
for enc_layer in self.encoder_layers:
|
| 128 |
+
enc_output = enc_layer(enc_output, src_mask)
|
| 129 |
+
dec_output = tgt_embedded
|
| 130 |
+
for dec_layer in self.decoder_layers:
|
| 131 |
+
dec_output = dec_layer(dec_output, enc_output, src_mask, tgt_mask)
|
| 132 |
+
output = self.fc(dec_output)
|
| 133 |
+
return output
|
| 134 |
|
| 135 |
# Set device
|
| 136 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
| 145 |
model.eval()
|
| 146 |
model = model.to(device)
|
| 147 |
|
|
|
|
| 148 |
def generate_code(pseudocode, max_len):
|
| 149 |
"""Generate C++ code from pseudocode with streaming output."""
|
| 150 |
model.eval()
|
|
|
|
| 161 |
tgt = torch.cat([tgt, torch.tensor([[next_token]], device=device)], dim=1)
|
| 162 |
response = sp_code.decode_ids(generated_tokens)
|
| 163 |
yield response # Yield partial output
|
| 164 |
+
if next_token == 5: # <END>=5
|
| 165 |
break
|
| 166 |
yield response # Final output
|
| 167 |
|
| 168 |
def respond(message, history, max_tokens):
|
| 169 |
"""Wrapper for Gradio interface."""
|
|
|
|
| 170 |
for response in generate_code(message, max_tokens):
|
| 171 |
yield response
|
| 172 |
|