job-description-generator2 / src /streamlit_app.py
zewial's picture
Update src/streamlit_app.py
5acbbfe verified
"""
Job Description Generator - Streamlit App
"""
import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
from peft import PeftModel
import time
# Page config
st.set_page_config(
page_title="Job Description Generator",
page_icon="πŸ’Ό",
layout="wide"
)
# Title
st.title("πŸ’Ό AI Job Description Generator")
st.markdown("### Powered by LLaMA-3 8B Fine-tuned on 15,885 Job Postings")
# Sidebar
with st.sidebar:
st.header("βš™οΈ Generation Settings")
temperature = st.slider("Temperature", 0.1, 1.5, 0.7, 0.1)
top_k = st.slider("Top-K", 10, 100, 50, 10)
top_p = st.slider("Top-P", 0.5, 1.0, 0.95, 0.05)
max_length = st.slider("Max Length", 200, 800, 400, 50)
st.markdown("---")
st.info("""
**Model**: LLaMA-3 8B
**Method**: QLoRA (4-bit)
**Training**: 15,885 samples
**Performance**: +244% BLEU
""")
if st.button("πŸ—‘οΈ Clear History"):
st.session_state.messages = []
st.rerun()
# Initialize session state
if "messages" not in st.session_state:
st.session_state.messages = []
if "model" not in st.session_state:
st.session_state.model = None
# Load model
@st.cache_resource
def load_model():
with st.spinner("πŸ”„ Loading model... (2-3 minutes first time)"):
try:
model_id = "zewial/job-description-generator-llama3-8b"
base_model = "meta-llama/Meta-Llama-3-8B-Instruct"
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_use_double_quant=True
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
model = AutoModelForCausalLM.from_pretrained(
base_model,
quantization_config=bnb_config,
device_map="auto",
trust_remote_code=True
)
model = PeftModel.from_pretrained(model, model_id)
model.eval()
return model, tokenizer
except Exception as e:
st.error(f"❌ Error loading model: {str(e)}")
st.info("πŸ’‘ Make sure model is uploaded at: https://huggingface.co/zewial/job-description-generator-llama3-8b")
return None, None
# Load model if not loaded
if st.session_state.model is None:
model, tokenizer = load_model()
if model is not None:
st.session_state.model = model
st.session_state.tokenizer = tokenizer
st.success("βœ… Model loaded successfully!")
else:
model = st.session_state.model
tokenizer = st.session_state.tokenizer
# Main interface
if model is not None:
# Input form
with st.form("job_form", clear_on_submit=True):
st.subheader("πŸ“ Job Details")
col1, col2 = st.columns(2)
with col1:
job_title = st.text_input(
"Job Title *",
placeholder="e.g., Senior Data Scientist",
help="The position you're hiring for"
)
work_type = st.selectbox(
"Work Type *",
["Full-time", "Part-time", "Contract", "Internship", "Remote"],
help="Employment type"
)
with col2:
experience = st.selectbox(
"Experience Level *",
["Entry level", "Mid-Senior level", "Senior level", "Executive level"],
index=1,
help="Required experience level"
)
location = st.text_input(
"Location *",
placeholder="e.g., San Francisco, CA or Remote",
help="Job location"
)
additional = st.text_area(
"Additional Instructions (Optional)",
placeholder="Any specific requirements, benefits, or details to include...",
help="Optional: Add specific details you want in the description"
)
submitted = st.form_submit_button("πŸš€ Generate Job Description", use_container_width=True)
# Generate when submitted
if submitted:
if not job_title or not location:
st.error("❌ Please fill in Job Title and Location")
else:
# Create prompt
prompt = f"""<|begin_of_text|><|start_header_id|>system<|end_header_id|>
You are an expert HR professional who writes clear, professional, and comprehensive job descriptions.<|eot_id|><|start_header_id|>user<|end_header_id|>
Generate a professional job description for:
Job Title: {job_title}
Experience Level: {experience}
Work Type: {work_type}
Location: {location}"""
if additional:
prompt += f"\n\nAdditional Requirements:\n{additional}"
prompt += "\n\nCreate a comprehensive job description with company/role overview, responsibilities, qualifications, and benefits.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
# Store user input
user_msg = f"**Title:** {job_title} | **Level:** {experience} | **Type:** {work_type} | **Location:** {location}"
st.session_state.messages.append({"role": "user", "content": user_msg})
# Generate
with st.spinner("✨ Generating job description..."):
try:
inputs = tokenizer(
prompt,
return_tensors="pt",
max_length=512,
truncation=True
).to(model.device)
start_time = time.time()
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=max_length,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=1.1,
do_sample=True,
pad_token_id=tokenizer.pad_token_id
)
gen_time = time.time() - start_time
generated = tokenizer.decode(
outputs[0][inputs['input_ids'].shape[1]:],
skip_special_tokens=True
)
# Store assistant response
st.session_state.messages.append({
"role": "assistant",
"content": generated,
"time": gen_time,
"tokens": len(outputs[0]) - inputs['input_ids'].shape[1],
"title": job_title
})
except Exception as e:
st.error(f"❌ Generation error: {str(e)}")
# Display chat history
if len(st.session_state.messages) > 0:
st.markdown("---")
st.subheader("πŸ’¬ Generation History")
for idx, msg in enumerate(st.session_state.messages):
if msg["role"] == "user":
with st.container():
st.markdown(f"**πŸ‘€ Your Input:**")
st.info(msg["content"])
else:
with st.container():
st.markdown(f"**πŸ€– Generated Description:**")
st.success(msg["content"])
# Stats
col1, col2, col3 = st.columns(3)
with col1:
st.metric("⏱️ Time", f"{msg.get('time', 0):.1f}s")
with col2:
st.metric("πŸ“ Words", len(msg["content"].split()))
with col3:
st.metric("πŸ”€ Tokens", msg.get('tokens', 0))
# Download button
st.download_button(
label="πŸ“₯ Download as Text",
data=msg["content"],
file_name=f"job_description_{msg.get('title', 'output').replace(' ', '_')}.txt",
mime="text/plain",
key=f"download_{idx}"
)
st.markdown("---")
if len(st.session_state.messages) == 0:
st.info("πŸ‘† Fill in the form above and click 'Generate Job Description' to start!")
else:
st.error("❌ Model failed to load. Please check the logs or contact support.")
# Footer
st.markdown("---")
st.markdown("""
<div style='text-align: center; color: #666; padding: 1rem;'>
<p>Built with ❀️ using LLaMA-3 8B | Fine-tuned on 15,885 job postings</p>
<p>⚑ Powered by QLoRA | πŸš€ Deployed on Hugging Face Spaces</p>
</div>
""", unsafe_allow_html=True)