File size: 2,833 Bytes
218e523
 
 
02574a5
 
218e523
 
 
 
 
02574a5
218e523
 
 
 
02574a5
 
218e523
02574a5
218e523
 
02574a5
 
 
218e523
02574a5
218e523
 
 
 
 
 
02574a5
 
 
218e523
 
 
02574a5
218e523
02574a5
 
218e523
02574a5
 
218e523
 
 
02574a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218e523
02574a5
218e523
 
02574a5
 
 
 
218e523
 
02574a5
218e523
 
 
 
 
 
 
 
 
 
 
 
 
 
 
02574a5
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import gradio as gr
from transformers import pipeline, AutoTokenizer

# Load a smaller model that fits within free tier memory limits
MODEL_NAME = "Salesforce/codegen-350M-mono"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
generator = pipeline(
    "text-generation",
    model=MODEL_NAME,
    tokenizer=tokenizer,
    device="cpu",  # Force CPU usage for free tier
)

def generate_code(natural_language_input):
    try:
        # Create a clear prompt for code generation
        prompt = f"# Python code that {natural_language_input}\n#"
        
        # Generate with conservative parameters to save memory
        generated = generator(
            prompt,
            max_new_tokens=128,  # Reduced from 256 to save memory
            temperature=0.7,
            top_p=0.9,
            pad_token_id=tokenizer.eos_token_id,
            do_sample=True,
        )
        
        # Extract and clean the generated code
        full_response = generated[0]['generated_text']
        code_response = full_response.replace(prompt, "").strip()
        
        # Remove any non-code text after generation
        if "```" in code_response:
            code_response = code_response.split("```")[0]
        
        return code_response
    except Exception as e:
        return f"Error generating code: {str(e)}\n\n(Tip: Try a simpler description or break your request into smaller parts)"

# Streamlined Gradio interface to reduce memory footprint
with gr.Blocks(title="Text to Python Code Generator") as demo:
    gr.Markdown("""
    # 🐍 Python Code Generator
    *Free tier version using Salesforce/codegen-350M-mono*
    """)
    
    with gr.Row():
        input_text = gr.Textbox(
            label="Describe what you want the code to do",
            placeholder="e.g., 'calculate fibonacci sequence up to N numbers'",
            lines=3
        )
    
    with gr.Row():
        generate_btn = gr.Button("Generate", variant="primary")
        clear_btn = gr.Button("Clear")
    
    output_code = gr.Code(
        label="Generated Python Code",
        language="python",
        interactive=True
    )
    
    # Memory-friendly examples
    examples = gr.Examples(
        examples=[
            ["print hello world"],
            ["function to calculate square of a number"],
            ["read a CSV file using pandas"],
            ["simple Flask app with one route"]
        ],
        inputs=input_text,
        label="Try these examples (keep requests simple)"
    )
    
    # Button actions
    generate_btn.click(
        fn=generate_code,
        inputs=input_text,
        outputs=output_code
    )
    
    clear_btn.click(
        fn=lambda: ("", ""),
        inputs=None,
        outputs=[input_text, output_code]
    )

# Launch with minimal resources
demo.launch(debug=False, show_error=True)