Spaces:
Sleeping
Sleeping
File size: 10,522 Bytes
262de9f 209529c 31668f0 262de9f 209529c a2669ab 209529c fffa819 ebf0eaf 209529c ebf0eaf 209529c fffa819 a2669ab ebf0eaf a2669ab 209529c a2669ab 4521d33 209529c 4521d33 209529c 4521d33 262de9f a2669ab 209529c 262de9f 209529c 262de9f 209529c 262de9f 209529c 4521d33 262de9f 4521d33 262de9f 209529c 262de9f 4521d33 209529c 262de9f 4521d33 262de9f 209529c 4521d33 209529c 262de9f 4521d33 262de9f 209529c 4521d33 209529c 4521d33 209529c 4521d33 209529c 262de9f 4521d33 209529c 4521d33 209529c 4521d33 262de9f 209529c 4521d33 209529c 4521d33 262de9f 209529c 262de9f 4521d33 209529c 4521d33 209529c 4521d33 209529c 4521d33 262de9f 4521d33 262de9f 209529c 4521d33 209529c 4521d33 209529c 4521d33 262de9f 209529c 4521d33 209529c 262de9f 209529c 262de9f 209529c 4521d33 262de9f 209529c 262de9f 209529c 262de9f 209529c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 |
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
# Model configuration
# Loading from current directory since model files are uploaded to Space root
MODEL_NAME = "." # Current directory contains model.safetensors and tokenizer files
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
# Global variables for model caching
_model = None
_tokenizer = None
def load_model():
"""Load the model and tokenizer with simple caching"""
global _model, _tokenizer
# Return cached model if already loaded
if _model is not None and _tokenizer is not None:
return _model, _tokenizer
print(f"Loading model from: {MODEL_NAME}")
print(f"Using device: {DEVICE}")
try:
# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
# Set pad token if not set
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
# Load model with appropriate settings
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32,
device_map="auto" if DEVICE == "cuda" else None,
trust_remote_code=True
)
if DEVICE == "cpu":
model = model.to(DEVICE)
print("✅ Model and tokenizer loaded successfully!")
# Cache the loaded model and tokenizer
_model = model
_tokenizer = tokenizer
return model, tokenizer
except Exception as e:
print(f"❌ Error loading model: {e}")
print("\n🔧 Troubleshooting:")
print("1. If using HF repository: Make sure MODEL_NAME is 'username/model-name'")
print("2. If using local files: Make sure model files are in the correct folder")
print("3. For private repos: Add authentication token")
raise e
# Initialize model and tokenizer
try:
model, tokenizer = load_model()
except Exception as e:
print(f"Failed to load model: {e}")
# Create dummy objects to prevent further errors
model, tokenizer = None, None
def generate_code(pseudocode, indent=1, line=1, temperature=0.7, top_p=0.9, max_length=128):
"""
Generate code from pseudo-code with line and indent information.
Args:
pseudocode: Input pseudo-code string
indent: Indentation level (1-10)
line: Line number (1-100)
temperature: Sampling temperature (0.1-2.0)
top_p: Nucleus sampling parameter (0.1-1.0)
max_length: Maximum length of generated sequence (50-512)
Returns:
Generated code string
"""
try:
# Check if model is loaded
if model is None or tokenizer is None:
return """❌ Model not loaded. Please check:
1. MODEL_NAME in app.py - should be either:
- Your HF repository: "username/model-name"
- Local path: "./model" (if files uploaded to Space)
2. If using HF repository, make sure it exists and is public
3. If using local files, ensure model files are in correct folder
Current MODEL_NAME: """ + MODEL_NAME
# Validate inputs
if not pseudocode.strip():
return "❌ Error: Please enter some pseudocode."
# Format input with line and indent information (matches training format)
prompt = f"Pseudocode: {pseudocode.strip()} | Indent: {indent} | Line: {line}\nCode:"
# Tokenize input
inputs = tokenizer(prompt, return_tensors='pt', padding=True, truncation=True, max_length=256)
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
# Generate with the model
model.eval()
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=max_length,
temperature=max(0.1, temperature), # Ensure minimum temperature
top_p=max(0.1, top_p), # Ensure minimum top_p
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id,
num_return_sequences=1,
repetition_penalty=1.1,
no_repeat_ngram_size=2
)
# Decode output
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Extract only the code part (remove the prompt)
if "Code:" in generated_text:
code = generated_text.split("Code:")[-1].strip()
else:
code = generated_text.strip()
# Clean up the output
if code.startswith(prompt):
code = code[len(prompt):].strip()
return code if code else "❌ No code generated. Try adjusting the parameters."
except Exception as e:
return f"❌ Error generating code: {str(e)}"
def create_examples():
"""Create example inputs for the interface"""
return [
["create string s", 1, 1, 0.7, 0.9, 100],
["read input from user", 1, 2, 0.7, 0.9, 100],
["if s is empty", 1, 3, 0.7, 0.9, 100],
["print hello world", 2, 4, 0.7, 0.9, 100],
["for i from 0 to n", 1, 5, 0.7, 0.9, 100],
["declare integer array", 1, 1, 0.5, 0.9, 80],
["while condition is true", 2, 10, 0.8, 0.95, 120]
]
# Create Gradio interface
with gr.Blocks(
theme=gr.themes.Soft(),
title="🐍 Pseudo-Code to Code Generator",
css="""
.gradio-container {
max-width: 1200px;
margin: auto;
}
.header {
text-align: center;
margin-bottom: 30px;
}
.info-box {
background-color: #f0f8ff;
padding: 15px;
border-radius: 10px;
margin: 10px 0;
}
"""
) as demo:
# Header
gr.HTML("""
<div class="header">
<h1>🐍 Pseudo-Code to Code Generator</h1>
<p>Convert natural language pseudo-code to executable code using fine-tuned GPT-2</p>
</div>
""")
# Info box
gr.HTML("""
<div class="info-box">
<h3>📋 How to use:</h3>
<ol>
<li><strong>Enter pseudocode:</strong> Describe what you want the code to do in natural language</li>
<li><strong>Set context:</strong> Adjust indent level and line number for better structure</li>
<li><strong>Tune generation:</strong> Modify temperature and top_p for different creativity levels</li>
<li><strong>Generate:</strong> Click submit to get your code!</li>
</ol>
<p><strong>Note:</strong> This model was trained on the SPOC dataset containing C++ code examples.</p>
</div>
""")
with gr.Row():
# Left column - Inputs
with gr.Column(scale=1):
pseudocode_input = gr.Textbox(
label="📝 Pseudocode",
placeholder="Enter your pseudocode here... (e.g., 'create string variable s')",
lines=3,
value="create string s"
)
with gr.Row():
indent_input = gr.Slider(
minimum=1, maximum=10, value=1, step=1,
label="🔢 Indent Level",
info="Indentation level for the code"
)
line_input = gr.Slider(
minimum=1, maximum=100, value=1, step=1,
label="📍 Line Number",
info="Line number in the program"
)
gr.Markdown("### 🎛️ Generation Parameters")
with gr.Row():
temperature_input = gr.Slider(
minimum=0.1, maximum=2.0, value=0.7, step=0.1,
label="🌡️ Temperature",
info="Higher = more creative, Lower = more focused"
)
top_p_input = gr.Slider(
minimum=0.1, maximum=1.0, value=0.9, step=0.05,
label="🎯 Top-p",
info="Nucleus sampling parameter"
)
max_length_input = gr.Slider(
minimum=50, maximum=512, value=128, step=10,
label="📏 Max Length",
info="Maximum number of tokens to generate"
)
generate_btn = gr.Button("🚀 Generate Code", variant="primary", size="lg")
# Right column - Output
with gr.Column(scale=1):
output = gr.Textbox(
label="💻 Generated Code",
lines=15,
placeholder="Generated code will appear here...",
show_copy_button=True
)
# Examples section
gr.Markdown("### 📚 Example Inputs")
examples = gr.Examples(
examples=create_examples(),
inputs=[pseudocode_input, indent_input, line_input, temperature_input, top_p_input, max_length_input],
outputs=output,
fn=generate_code,
cache_examples=False
)
# Event handlers
generate_btn.click(
fn=generate_code,
inputs=[pseudocode_input, indent_input, line_input, temperature_input, top_p_input, max_length_input],
outputs=output
)
# Also allow Enter key to generate
pseudocode_input.submit(
fn=generate_code,
inputs=[pseudocode_input, indent_input, line_input, temperature_input, top_p_input, max_length_input],
outputs=output
)
# Footer
gr.HTML("""
<div style="text-align: center; margin-top: 30px; padding: 20px; border-top: 1px solid #eee;">
<p>🤖 <strong>Model Details:</strong> Fine-tuned GPT-2 with LoRA on SPOC dataset</p>
<p>📊 <strong>Training:</strong> Pseudo-code to C++ code generation with structural information</p>
<p>⚡ <strong>Powered by:</strong> Transformers, Safetensors, and Gradio</p>
</div>
""")
# Launch configuration
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0", # Required for Hugging Face Spaces
server_port=7860, # Default port for Spaces
share=False, # Don't create public links in Spaces
show_api=False, # Disable API docs for cleaner interface
show_error=True, # Show errors for debugging
quiet=False # Show startup logs
) |