Spaces:
Runtime error
Runtime error
File size: 3,135 Bytes
7c2f77f 2bc2dcf 7c2f77f 2bc2dcf 7c2f77f 2bc2dcf 7c2f77f 2bc2dcf 7c2f77f 2bc2dcf 7c2f77f 2bc2dcf 7c2f77f 2bc2dcf 7c2f77f 2bc2dcf 7c2f77f 2bc2dcf 7c2f77f 2bc2dcf 7c2f77f 2bc2dcf 7c2f77f 2bc2dcf 7c2f77f 2bc2dcf 7c2f77f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 | import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
# -----------------------------
# Page Configuration
# -----------------------------
st.set_page_config(
page_title="AI Text Generator",
page_icon="🤖",
layout="wide"
)
# -----------------------------
# Device Setup (HF Spaces safe)
# -----------------------------
device = "cuda" if torch.cuda.is_available() else "cpu"
# -----------------------------
# Sidebar
# -----------------------------
st.sidebar.title("⚙️ Settings")
model_path = st.sidebar.text_input(
"Model Name / Path",
value="gpt2"
)
max_new_tokens = st.sidebar.slider("Max New Tokens", 20, 300, 100)
temperature = st.sidebar.slider("Temperature", 0.5, 1.5, 0.8)
top_k = st.sidebar.slider("Top-K", 10, 100, 50)
top_p = st.sidebar.slider("Top-P", 0.5, 1.0, 0.95)
st.sidebar.write(f"Device: **{device.upper()}**")
# -----------------------------
# Title
# -----------------------------
st.title("🤖 Professional AI Text Generator")
st.markdown("Generate text using Hugging Face models.")
# -----------------------------
# Load Model (cached)
# -----------------------------
@st.cache_resource
def load_model(model_name):
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float32 # safer for CPU Spaces
)
model.to(device)
model.eval()
return tokenizer, model
# Load model safely
try:
tokenizer, model = load_model(model_path)
except Exception as e:
st.error(f"Model loading failed: {e}")
st.stop()
# -----------------------------
# Input Area
# -----------------------------
prompt = st.text_area(
"Enter your prompt:",
height=200,
placeholder="Example: Once upon a time..."
)
# -----------------------------
# Generate Button
# -----------------------------
if st.button("✨ Generate Text", use_container_width=True):
if prompt.strip() == "":
st.warning("Please enter a prompt.")
else:
with st.spinner("Generating..."):
inputs = tokenizer(prompt, return_tensors="pt").to(device)
with torch.no_grad():
output = model.generate(
**inputs,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_k=top_k,
top_p=top_p,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
generated_text = tokenizer.decode(
output[0],
skip_special_tokens=True
)
st.subheader("Generated Output")
st.write(generated_text)
st.download_button(
label="📥 Download",
data=generated_text,
file_name="generated_text.txt",
mime="text/plain"
)
# -----------------------------
# Footer
# -----------------------------
st.markdown("---")
st.markdown("Built with ❤️ using Streamlit + Transformers")
|