import os import streamlit as st from transformers import AutoModelForSeq2SeqLM, AutoTokenizer import torch import subprocess # Force CPU usage & prevent model download issues os.environ["HF_HOME"] = "./cache" # Store model locally MODEL_NAME = "Salesforce/codegen-350M-mono" # Updated model tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME) def generate_code(description, language): prompt = f"Generate {language} code: {description}" inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True) outputs = model.generate(**inputs, max_length=400) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response.strip() def execute_code(code, language): if language == "Python": try: result = subprocess.run(['python3', '-c', code], capture_output=True, text=True, timeout=5) return result.stdout if result.stdout else result.stderr except Exception as e: return str(e) return "Code execution only supported for Python." # Streamlit UI st.title("Multi-Language Text-to-Code AI") st.write("Convert natural language descriptions into code in different programming languages! Run Python code directly in the app.") description = st.text_area("Describe your coding task...") language = st.selectbox("Select Programming Language", ["Python", "JavaScript", "Java"]) if st.button("Generate Code"): if description: code = generate_code(description, language) st.code(code, language=language.lower()) if language == "Python": output = execute_code(code, language) st.text_area("Execution Output", output, height=150) else: st.warning("Please enter a description to generate code.")