Spaces:
Sleeping
Sleeping
File size: 1,669 Bytes
b1fca7a 1eea6c5 b1fca7a 1eea6c5 b1fca7a 1eea6c5 b1fca7a ac26bd5 b1fca7a 1eea6c5 b1fca7a 750cdab b1fca7a 750cdab b1fca7a 1eea6c5 b1fca7a 1eea6c5 750cdab b1fca7a ac26bd5 750cdab b1fca7a 1eea6c5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import torch
# Load the model and tokenizer
@st.cache_resource()
def load_model():
MODEL_NAME = "Salesforce/codegen-350M-mono" # Use a known model
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float32)
# Load model into the pipeline for generation
return pipeline("text-generation", model=model, tokenizer=tokenizer)
# Initialize the model
code_generator = load_model()
# Streamlit UI
st.title("CodeGen Code Bot 🚀")
st.subheader("Generate code snippets using Hugging Face CodeGen")
# User input
prompt = st.text_area("Enter a coding prompt (e.g., 'Write a Python function to sort a list'): ")
# Generate Code
if st.button("Generate Code"):
if prompt.strip():
st.info("Generating code... Please wait ⏳")
try:
# Generate code using the model
response = code_generator(
prompt,
max_length=512, # Adjust for longer code if needed
temperature=0.2, # Lower temperature for more deterministic results
do_sample=True, # Enable sampling
num_return_sequences=1
)
generated_code = response[0]['generated_text']
# Display the generated code output
st.code(generated_code, language="python") # Change language as needed
except Exception as e:
st.error(f"Error: {str(e)}")
else:
st.warning("Please enter a prompt.")
st.caption("Powered by CodeGen | Streamlit UI")
|