import gradio as gr import torch from transformers import AutoModelForCausalLM, AutoTokenizer import os # Model configuration # Loading from current directory since model files are uploaded to Space root MODEL_NAME = "." # Current directory contains model.safetensors and tokenizer files DEVICE = "cuda" if torch.cuda.is_available() else "cpu" # Global variables for model caching _model = None _tokenizer = None def load_model(): """Load the model and tokenizer with simple caching""" global _model, _tokenizer # Return cached model if already loaded if _model is not None and _tokenizer is not None: return _model, _tokenizer print(f"Loading model from: {MODEL_NAME}") print(f"Using device: {DEVICE}") try: # Load tokenizer tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) # Set pad token if not set if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token # Load model with appropriate settings model = AutoModelForCausalLM.from_pretrained( MODEL_NAME, torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32, device_map="auto" if DEVICE == "cuda" else None, trust_remote_code=True ) if DEVICE == "cpu": model = model.to(DEVICE) print("āœ… Model and tokenizer loaded successfully!") # Cache the loaded model and tokenizer _model = model _tokenizer = tokenizer return model, tokenizer except Exception as e: print(f"āŒ Error loading model: {e}") print("\nšŸ”§ Troubleshooting:") print("1. If using HF repository: Make sure MODEL_NAME is 'username/model-name'") print("2. If using local files: Make sure model files are in the correct folder") print("3. For private repos: Add authentication token") raise e # Initialize model and tokenizer try: model, tokenizer = load_model() except Exception as e: print(f"Failed to load model: {e}") # Create dummy objects to prevent further errors model, tokenizer = None, None def generate_code(pseudocode, indent=1, line=1, temperature=0.7, top_p=0.9, max_length=128): """ Generate code from pseudo-code with line and indent information. Args: pseudocode: Input pseudo-code string indent: Indentation level (1-10) line: Line number (1-100) temperature: Sampling temperature (0.1-2.0) top_p: Nucleus sampling parameter (0.1-1.0) max_length: Maximum length of generated sequence (50-512) Returns: Generated code string """ try: # Check if model is loaded if model is None or tokenizer is None: return """āŒ Model not loaded. Please check: 1. MODEL_NAME in app.py - should be either: - Your HF repository: "username/model-name" - Local path: "./model" (if files uploaded to Space) 2. If using HF repository, make sure it exists and is public 3. If using local files, ensure model files are in correct folder Current MODEL_NAME: """ + MODEL_NAME # Validate inputs if not pseudocode.strip(): return "āŒ Error: Please enter some pseudocode." # Format input with line and indent information (matches training format) prompt = f"Pseudocode: {pseudocode.strip()} | Indent: {indent} | Line: {line}\nCode:" # Tokenize input inputs = tokenizer(prompt, return_tensors='pt', padding=True, truncation=True, max_length=256) inputs = {k: v.to(DEVICE) for k, v in inputs.items()} # Generate with the model model.eval() with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=max_length, temperature=max(0.1, temperature), # Ensure minimum temperature top_p=max(0.1, top_p), # Ensure minimum top_p do_sample=True, pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id, num_return_sequences=1, repetition_penalty=1.1, no_repeat_ngram_size=2 ) # Decode output generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) # Extract only the code part (remove the prompt) if "Code:" in generated_text: code = generated_text.split("Code:")[-1].strip() else: code = generated_text.strip() # Clean up the output if code.startswith(prompt): code = code[len(prompt):].strip() return code if code else "āŒ No code generated. Try adjusting the parameters." except Exception as e: return f"āŒ Error generating code: {str(e)}" def create_examples(): """Create example inputs for the interface""" return [ ["create string s", 1, 1, 0.7, 0.9, 100], ["read input from user", 1, 2, 0.7, 0.9, 100], ["if s is empty", 1, 3, 0.7, 0.9, 100], ["print hello world", 2, 4, 0.7, 0.9, 100], ["for i from 0 to n", 1, 5, 0.7, 0.9, 100], ["declare integer array", 1, 1, 0.5, 0.9, 80], ["while condition is true", 2, 10, 0.8, 0.95, 120] ] # Create Gradio interface with gr.Blocks( theme=gr.themes.Soft(), title="šŸ Pseudo-Code to Code Generator", css=""" .gradio-container { max-width: 1200px; margin: auto; } .header { text-align: center; margin-bottom: 30px; } .info-box { background-color: #f0f8ff; padding: 15px; border-radius: 10px; margin: 10px 0; } """ ) as demo: # Header gr.HTML("""

šŸ Pseudo-Code to Code Generator

Convert natural language pseudo-code to executable code using fine-tuned GPT-2

""") # Info box gr.HTML("""

šŸ“‹ How to use:

  1. Enter pseudocode: Describe what you want the code to do in natural language
  2. Set context: Adjust indent level and line number for better structure
  3. Tune generation: Modify temperature and top_p for different creativity levels
  4. Generate: Click submit to get your code!

Note: This model was trained on the SPOC dataset containing C++ code examples.

""") with gr.Row(): # Left column - Inputs with gr.Column(scale=1): pseudocode_input = gr.Textbox( label="šŸ“ Pseudocode", placeholder="Enter your pseudocode here... (e.g., 'create string variable s')", lines=3, value="create string s" ) with gr.Row(): indent_input = gr.Slider( minimum=1, maximum=10, value=1, step=1, label="šŸ”¢ Indent Level", info="Indentation level for the code" ) line_input = gr.Slider( minimum=1, maximum=100, value=1, step=1, label="šŸ“ Line Number", info="Line number in the program" ) gr.Markdown("### šŸŽ›ļø Generation Parameters") with gr.Row(): temperature_input = gr.Slider( minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="šŸŒ”ļø Temperature", info="Higher = more creative, Lower = more focused" ) top_p_input = gr.Slider( minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="šŸŽÆ Top-p", info="Nucleus sampling parameter" ) max_length_input = gr.Slider( minimum=50, maximum=512, value=128, step=10, label="šŸ“ Max Length", info="Maximum number of tokens to generate" ) generate_btn = gr.Button("šŸš€ Generate Code", variant="primary", size="lg") # Right column - Output with gr.Column(scale=1): output = gr.Textbox( label="šŸ’» Generated Code", lines=15, placeholder="Generated code will appear here...", show_copy_button=True ) # Examples section gr.Markdown("### šŸ“š Example Inputs") examples = gr.Examples( examples=create_examples(), inputs=[pseudocode_input, indent_input, line_input, temperature_input, top_p_input, max_length_input], outputs=output, fn=generate_code, cache_examples=False ) # Event handlers generate_btn.click( fn=generate_code, inputs=[pseudocode_input, indent_input, line_input, temperature_input, top_p_input, max_length_input], outputs=output ) # Also allow Enter key to generate pseudocode_input.submit( fn=generate_code, inputs=[pseudocode_input, indent_input, line_input, temperature_input, top_p_input, max_length_input], outputs=output ) # Footer gr.HTML("""

šŸ¤– Model Details: Fine-tuned GPT-2 with LoRA on SPOC dataset

šŸ“Š Training: Pseudo-code to C++ code generation with structural information

⚔ Powered by: Transformers, Safetensors, and Gradio

""") # Launch configuration if __name__ == "__main__": demo.launch( server_name="0.0.0.0", # Required for Hugging Face Spaces server_port=7860, # Default port for Spaces share=False, # Don't create public links in Spaces show_api=False, # Disable API docs for cleaner interface show_error=True, # Show errors for debugging quiet=False # Show startup logs )