Spaces:
Build error
Build error
| import gradio as gr | |
| import torch | |
| import torch.nn as nn | |
| import sentencepiece as spm | |
| import math | |
| # Define Transformer components (unchanged) | |
| class MultiHeadAttention(nn.Module): | |
| def __init__(self, d_model, num_heads): | |
| super(MultiHeadAttention, self).__init__() | |
| assert d_model % num_heads == 0 | |
| self.d_model = d_model | |
| self.num_heads = num_heads | |
| self.d_k = d_model // num_heads | |
| self.W_q = nn.Linear(d_model, d_model) | |
| self.W_k = nn.Linear(d_model, d_model) | |
| self.W_v = nn.Linear(d_model, d_model) | |
| self.W_o = nn.Linear(d_model, d_model) | |
| def scaled_dot_product_attention(self, Q, K, V, mask=None): | |
| attn_scores = torch.matmul(Q, K.transpose(-2, -1)) / math.sqrt(self.d_k) | |
| if mask is not None: | |
| attn_scores = attn_scores.masked_fill(mask == 0, -1e9) | |
| attn_probs = torch.softmax(attn_scores, dim=-1) | |
| output = torch.matmul(attn_probs, V) | |
| return output | |
| def split_heads(self, x): | |
| batch_size, seq_length, d_model = x.size() | |
| return x.view(batch_size, seq_length, self.num_heads, self.d_k).transpose(1, 2) | |
| def combine_heads(self, x): | |
| batch_size, _, seq_length, d_k = x.size() | |
| return x.transpose(1, 2).contiguous().view(batch_size, seq_length, self.d_model) | |
| def forward(self, Q, K, V, mask=None): | |
| Q = self.split_heads(self.W_q(Q)) | |
| K = self.split_heads(self.W_k(K)) | |
| V = self.split_heads(self.W_v(V)) | |
| attn_output = self.scaled_dot_product_attention(Q, K, V, mask) | |
| output = self.W_o(self.combine_heads(attn_output)) | |
| return output | |
| class PositionWiseFeedForward(nn.Module): | |
| def __init__(self, d_model, d_ff): | |
| super(PositionWiseFeedForward, self).__init__() | |
| self.fc1 = nn.Linear(d_model, d_ff) | |
| self.fc2 = nn.Linear(d_ff, d_model) | |
| self.relu = nn.ReLU() | |
| def forward(self, x): | |
| return self.fc2(self.relu(self.fc1(x))) | |
| class PositionalEncoding(nn.Module): | |
| def __init__(self, d_model, max_seq_length): | |
| super(PositionalEncoding, self).__init__() | |
| pe = torch.zeros(max_seq_length, d_model) | |
| position = torch.arange(0, max_seq_length, dtype=torch.float).unsqueeze(1) | |
| div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)) | |
| pe[:, 0::2] = torch.sin(position * div_term) | |
| pe[:, 1::2] = torch.cos(position * div_term) | |
| self.register_buffer('pe', pe.unsqueeze(0)) | |
| def forward(self, x): | |
| return x + self.pe[:, :x.size(1)] | |
| class EncoderLayer(nn.Module): | |
| def __init__(self, d_model, num_heads, d_ff, dropout): | |
| super(EncoderLayer, self).__init__() | |
| self.self_attn = MultiHeadAttention(d_model, num_heads) | |
| self.feed_forward = PositionWiseFeedForward(d_model, d_ff) | |
| self.norm1 = nn.LayerNorm(d_model) | |
| self.norm2 = nn.LayerNorm(d_model) | |
| self.dropout = nn.Dropout(dropout) | |
| def forward(self, x, mask): | |
| attn_output = self.self_attn(x, x, x, mask) | |
| x = self.norm1(x + self.dropout(attn_output)) | |
| ff_output = self.feed_forward(x) | |
| x = self.norm2(x + self.dropout(ff_output)) | |
| return x | |
| class DecoderLayer(nn.Module): | |
| def __init__(self, d_model, num_heads, d_ff, dropout): | |
| super(DecoderLayer, self).__init__() | |
| self.self_attn = MultiHeadAttention(d_model, num_heads) | |
| self.cross_attn = MultiHeadAttention(d_model, num_heads) | |
| self.feed_forward = PositionWiseFeedForward(d_model, d_ff) | |
| self.norm1 = nn.LayerNorm(d_model) | |
| self.norm2 = nn.LayerNorm(d_model) | |
| self.norm3 = nn.LayerNorm(d_model) | |
| self.dropout = nn.Dropout(dropout) | |
| def forward(self, x, enc_output, src_mask, tgt_mask): | |
| attn_output = self.self_attn(x, x, x, tgt_mask) | |
| x = self.norm1(x + self.dropout(attn_output)) | |
| attn_output = self.cross_attn(x, enc_output, enc_output, src_mask) | |
| x = self.norm2(x + self.dropout(attn_output)) | |
| ff_output = self.feed_forward(x) | |
| x = self.norm3(x + self.dropout(ff_output)) | |
| return x | |
| class Transformer(nn.Module): | |
| def __init__(self, src_vocab_size, tgt_vocab_size, d_model, num_heads, num_layers, d_ff, max_seq_length, dropout): | |
| super(Transformer, self).__init__() | |
| self.encoder_embedding = nn.Embedding(src_vocab_size, d_model) | |
| self.decoder_embedding = nn.Embedding(tgt_vocab_size, d_model) | |
| self.positional_encoding = PositionalEncoding(d_model, max_seq_length) | |
| self.encoder_layers = nn.ModuleList([EncoderLayer(d_model, num_heads, d_ff, dropout) for _ in range(num_layers)]) | |
| self.decoder_layers = nn.ModuleList([DecoderLayer(d_model, num_heads, d_ff, dropout) for _ in range(num_layers)]) | |
| self.fc = nn.Linear(d_model, tgt_vocab_size) | |
| self.dropout = nn.Dropout(dropout) | |
| def generate_mask(self, src, tgt): | |
| src_mask = (src != 0).unsqueeze(1).unsqueeze(2) | |
| tgt_mask = (tgt != 0).unsqueeze(1).unsqueeze(3) | |
| seq_length = tgt.size(1) | |
| nopeak_mask = (1 - torch.triu(torch.ones(1, seq_length, seq_length), diagonal=1)).bool() | |
| tgt_mask = tgt_mask & nopeak_mask | |
| return src_mask, tgt_mask | |
| def forward(self, src, tgt): | |
| src_mask, tgt_mask = self.generate_mask(src, tgt) | |
| src_embedded = self.dropout(self.positional_encoding(self.encoder_embedding(src))) | |
| tgt_embedded = self.dropout(self.positional_encoding(self.decoder_embedding(tgt))) | |
| enc_output = src_embedded | |
| for enc_layer in self.encoder_layers: | |
| enc_output = enc_layer(enc_output, src_mask) | |
| dec_output = tgt_embedded | |
| for dec_layer in self.decoder_layers: | |
| dec_output = dec_layer(dec_output, enc_output, src_mask, tgt_mask) | |
| output = self.fc(dec_output) | |
| return output | |
| # Set device | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| # Load tokenizers | |
| sp_pseudo = spm.SentencePieceProcessor(model_file="pseudo.model") # For decoding pseudocode (target) | |
| sp_code = spm.SentencePieceProcessor(model_file="code.model") # For encoding C++ (source) | |
| # Load the full saved model (architecture + weights) | |
| model_path = "transformer_cpp_to_pseudo.pth" | |
| model = torch.load(model_path, map_location=device, weights_only=False) | |
| model.eval() | |
| model = model.to(device) | |
| def generate_pseudocode(cpp_code, max_len): | |
| """Generate pseudocode from C++ code with streaming output.""" | |
| model.eval() | |
| src = torch.tensor([sp_code.encode_as_ids(cpp_code)], dtype=torch.long, device=device) # Tokenize C++ code | |
| tgt = torch.tensor([[2]], dtype=torch.long, device=device) # <bos_id>=2 | |
| generated_tokens = [2] # Start with <START> | |
| response = "" | |
| with torch.no_grad(): | |
| for _ in range(max_len): | |
| output = model(src, tgt) | |
| next_token = output[:, -1, :].argmax(-1).item() | |
| generated_tokens.append(next_token) | |
| tgt = torch.cat([tgt, torch.tensor([[next_token]], device=device)], dim=1) | |
| response = sp_pseudo.decode_ids(generated_tokens) # Decode to pseudocode | |
| yield response # Yield partial output | |
| if next_token == 3: # <END>=3 (adjust if your EOS ID differs) | |
| break | |
| yield response # Final output | |
| def respond(message, history, max_tokens): | |
| """Wrapper for Gradio interface.""" | |
| for response in generate_pseudocode(message, max_tokens): | |
| yield response | |
| # Gradio UI setup with Blocks | |
| with gr.Blocks(title="C++ to Pseudocode Transformer") as demo: | |
| gr.Markdown("## C++ to Pseudocode Converter") | |
| gr.Markdown("Enter C++ code below and press Submit to generate pseudocode.") | |
| cpp_input = gr.Textbox( | |
| label="C++ Code", | |
| placeholder="e.g., 'int x = 5; for(int i=0; i<x; i++) cout << i;'", | |
| lines=5 | |
| ) | |
| submit_btn = gr.Button("Submit", variant="primary") | |
| pseudocode_output = gr.Textbox( | |
| label="Generated Pseudocode", | |
| lines=5 | |
| ) | |
| submit_btn.click( | |
| fn=respond, | |
| inputs=[cpp_input, gr.State(value=[]), gr.Slider(minimum=10, maximum=1000, value=50, step=1, visible=False)], | |
| outputs=pseudocode_output | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |