Pyco / app.py
Wazahat's picture
Create app.py
8d48c50 verified
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the model and tokenizer
model_name = "codellama/CodeLlama-7b-Python-hf"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Function to generate Python code using the model
def generate_code(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
with torch.no_grad():
outputs = model.generate(
inputs['input_ids'],
max_length=200,
num_return_sequences=1,
temperature=0.7,
top_p=0.9,
top_k=50
)
code = tokenizer.decode(outputs[0], skip_special_tokens=True)
return code
# Streamlit app layout
st.set_page_config(page_title="Code Generator", layout="wide")
# Sidebar for history
with st.sidebar:
st.header("History")
history = st.empty() # This will be used to store the chat history
# Main area for the code generation interaction
st.title("CodeLlama Python Code Generator")
st.subheader("Ask me to write clean Python code!")
# Input section (bottom input box like ChatGPT)
user_input = st.text_input("Enter your prompt:", "")
if user_input:
# Generate Python code
prompt = f"Generate clean Python code for: {user_input}"
generated_code = generate_code(prompt)
# Show the output
st.code(generated_code, language="python")
# Update the chat history with the latest input and output
if "history" not in st.session_state:
st.session_state.history = []
st.session_state.history.append({
"user": user_input,
"response": generated_code
})
# Display history in the sidebar
if st.session_state.history:
history_text = ""
for entry in st.session_state.history:
history_text += f"User: {entry['user']}\nResponse: {entry['response']}\n\n"
history.text_area("Chat History", value=history_text, height=300)
# Clear history button (for new chat)
if st.button("Start New Chat"):
st.session_state.history = []
st.experimental_rerun()