File size: 2,103 Bytes
8d48c50 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the model and tokenizer
model_name = "codellama/CodeLlama-7b-Python-hf"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Function to generate Python code using the model
def generate_code(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
with torch.no_grad():
outputs = model.generate(
inputs['input_ids'],
max_length=200,
num_return_sequences=1,
temperature=0.7,
top_p=0.9,
top_k=50
)
code = tokenizer.decode(outputs[0], skip_special_tokens=True)
return code
# Streamlit app layout
st.set_page_config(page_title="Code Generator", layout="wide")
# Sidebar for history
with st.sidebar:
st.header("History")
history = st.empty() # This will be used to store the chat history
# Main area for the code generation interaction
st.title("CodeLlama Python Code Generator")
st.subheader("Ask me to write clean Python code!")
# Input section (bottom input box like ChatGPT)
user_input = st.text_input("Enter your prompt:", "")
if user_input:
# Generate Python code
prompt = f"Generate clean Python code for: {user_input}"
generated_code = generate_code(prompt)
# Show the output
st.code(generated_code, language="python")
# Update the chat history with the latest input and output
if "history" not in st.session_state:
st.session_state.history = []
st.session_state.history.append({
"user": user_input,
"response": generated_code
})
# Display history in the sidebar
if st.session_state.history:
history_text = ""
for entry in st.session_state.history:
history_text += f"User: {entry['user']}\nResponse: {entry['response']}\n\n"
history.text_area("Chat History", value=history_text, height=300)
# Clear history button (for new chat)
if st.button("Start New Chat"):
st.session_state.history = []
st.experimental_rerun()
|