import streamlit as st from transformers import AutoTokenizer, AutoModelForCausalLM import torch # Load model and tokenizer MODEL_NAME = "Johannes/code-generation-model-fine-tuned" tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) model = AutoModelForCausalLM.from_pretrained(MODEL_NAME) st.title("Pseudo-code to Code") prompt = st.text_area("Enter a code:") # Fixed maximum length max_length = 150 if st.button("Generate Code"): if prompt.strip() != "": inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate( **inputs, max_length=max_length, do_sample=True, temperature=0.7 ) generated_code = tokenizer.decode(outputs[0], skip_special_tokens=True) st.code(generated_code, language="python") else: st.warning("Please enter a prompt.")