File size: 1,010 Bytes
432dde7
 
 
 
 
 
445b95f
432dde7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline

# Load your model and tokenizer
@st.cache(allow_output_mutation=True)
def load_model():
    model_name = "abhishekyo/codellama2-finetuned-codex-fin7"
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(model_name)
    gen_pipeline = pipeline('text-generation', model=model, tokenizer=tokenizer, device=0)
    return gen_pipeline

gen_pipeline = load_model()

st.title('Text-to-Code Generator')

# Text input
user_input = st.text_area("Enter your text here:", height=200)
if st.button("Generate Code"):
    if user_input:
        with st.spinner("Generating code..."):
            results = gen_pipeline(user_input, max_length=512, num_return_sequences=1)
            generated_code = results[0]['generated_text']
        st.text_area("Generated Code:", value=generated_code, height=200)
    else:
        st.warning("Please enter some text to generate code.")