|
|
|
|
|
|
|
|
|
|
|
|
|
|
import streamlit as st |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from wordcloud import WordCloud |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import matplotlib.pyplot as plt |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import torch |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@st.cache_resource |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_model_and_tokenizer(): |
|
|
model_name = "google/gemma-2b" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
|
|
|
return tokenizer, model |
|
|
|
|
|
|
|
|
|
|
|
def generate_text(prompt, tone, max_length, temperature=0.7, top_p=0.9, repetition_penalty=1.0): |
|
|
tokenizer, model = load_model_and_tokenizer() |
|
|
|
|
|
tone_prompts = { |
|
|
"Funny": f"Generate a funny response to: {prompt}", |
|
|
"Serious": f"Provide a serious and thoughtful response to: {prompt}", |
|
|
"Poetic": f"Write a poetic response to: {prompt}" |
|
|
} |
|
|
input_text = tone_prompts.get(tone, prompt) |
|
|
|
|
|
inputs = tokenizer(input_text, return_tensors="pt") |
|
|
outputs = model.generate( |
|
|
inputs["input_ids"], |
|
|
max_length=max_length + len(input_text.split()), |
|
|
temperature=temperature, |
|
|
top_p=top_p, |
|
|
repetition_penalty=repetition_penalty, |
|
|
num_return_sequences=1, |
|
|
do_sample=True |
|
|
) |
|
|
return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
|
|
|
st.markdown(""" |
|
|
<style> |
|
|
/* Background image with fallback */ |
|
|
.stApp { |
|
|
background: linear-gradient(rgba(0,0,0,0.6), rgba(0,0,0,0.6)), url('images/background.png'); |
|
|
background-size: cover; |
|
|
background-position: center; |
|
|
color: #ffffff; /* White text for contrast */ |
|
|
} |
|
|
/* Cool gradient title with hover animation */ |
|
|
.title { |
|
|
background: linear-gradient(90deg, #00d2ff, #3a7bd5); |
|
|
-webkit-background-clip: text; |
|
|
color: transparent; |
|
|
font-size: 40px; |
|
|
font-weight: bold; |
|
|
transition: transform 0.3s; |
|
|
} |
|
|
.title:hover { |
|
|
transform: scale(1.05); |
|
|
} |
|
|
/* Card-like instructions */ |
|
|
.instructions { |
|
|
background: rgba(255, 255, 255, 0.1); |
|
|
padding: 15px; |
|
|
border-radius: 10px; |
|
|
box-shadow: 0 5px 20px rgba(0,0,0,0.3); |
|
|
font-size: 18px; |
|
|
color: #e0e0e0; |
|
|
} |
|
|
/* Neon glow output box */ |
|
|
.output-box { |
|
|
background: rgba(30, 30, 50, 0.9); |
|
|
padding: 15px; |
|
|
border-radius: 12px; |
|
|
box-shadow: 0 0 15px #00d2ff, 0 0 30px #3a7bd5; |
|
|
font-family: 'Courier New', monospace; |
|
|
font-size: 16px; |
|
|
color: #ffffff; |
|
|
white-space: pre-wrap; |
|
|
animation: glow 1.5s infinite alternate; |
|
|
} |
|
|
@keyframes glow { |
|
|
from { box-shadow: 0 0 10px #00d2ff; } |
|
|
to { box-shadow: 0 0 20px #3a7bd5; } |
|
|
} |
|
|
/* Button hover effect */ |
|
|
.stButton>button { |
|
|
background: #3a7bd5; |
|
|
color: white; |
|
|
border-radius: 8px; |
|
|
transition: all 0.3s; |
|
|
} |
|
|
.stButton>button:hover { |
|
|
background: #00d2ff; |
|
|
transform: translateY(-2px); |
|
|
} |
|
|
/* Slider styling */ |
|
|
.stSlider>div>div>div { |
|
|
background: #00d2ff !important; |
|
|
} |
|
|
</style> |
|
|
""", unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
col1, col2 = st.columns([3, 1]) |
|
|
with col1: |
|
|
st.markdown('<p class="title">Gemma Text Generator</p>', unsafe_allow_html=True) |
|
|
with col2: |
|
|
st.image("images/gsoc_logo.png", width=80, caption="GSoC 2025") |
|
|
|
|
|
|
|
|
|
|
|
st.markdown(""" |
|
|
<p class="instructions"> |
|
|
Enter a prompt below to generate text using the Gemma model from DeepMind. Customize the tone and length to see different outputs!<br> |
|
|
<b>Example:</b> Prompt: "The cat sat on" | Tone: "Funny" | Length: 50 → "The cat sat on my homework and laughed as I cried over my grades." |
|
|
</p> |
|
|
""", unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
|
|
|
with st.expander("\U0001F9E0 How does this work? Click to peek inside."): |
|
|
st.markdown(""" |
|
|
- This app uses **Gemma-2B**, a language model from Google DeepMind. |
|
|
- You give it a prompt, and it predicts the next words one-by-one (aka causal language modeling). |
|
|
- The **tone** you choose adds flavor to the prompt before it hits the model. |
|
|
- Parameters like **temperature** control how wild or safe the answers are. |
|
|
- The output is visualized in a **Word Cloud** so you can see which words stand out! |
|
|
""") |
|
|
|
|
|
|
|
|
if "trigger_example" not in st.session_state: |
|
|
st.session_state.trigger_example = False |
|
|
|
|
|
col1, col2 = st.columns(2) |
|
|
with col1: |
|
|
if st.button("Try Funny Cat Story"): |
|
|
st.session_state.prompt = "The cat hacked my WiFi" |
|
|
st.session_state.tone = "Funny" |
|
|
st.session_state.trigger_example = True |
|
|
with col2: |
|
|
if st.button("Try Poetic Goodbye"): |
|
|
st.session_state.prompt = "As the sun set on our final day" |
|
|
st.session_state.tone = "Poetic" |
|
|
st.session_state.trigger_example = True |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with st.form(key="input_form"): |
|
|
prompt = st.text_input("Enter a prompt", placeholder="e.g., 'The future of AI is'", value=st.session_state.get("prompt", "")) |
|
|
tone = st.selectbox("Tone", ["Funny", "Serious", "Poetic"], index=["Funny", "Serious", "Poetic"].index(st.session_state.get("tone", "Funny"))) |
|
|
temperature = st.slider("Temperature (Creativity)", 0.2, 1.5, 0.7) |
|
|
top_p = st.slider("Top-p (Nucleus Sampling)", 0.1, 1.0, 0.9) |
|
|
repetition_penalty = st.slider("Repetition Penalty", 1.0, 2.0, 1.0) |
|
|
|
|
|
max_length = st.slider("Word count", 20, 100, 50) |
|
|
submit_button = st.form_submit_button(label="Generate") |
|
|
|
|
|
|
|
|
if submit_button or st.session_state.trigger_example: |
|
|
st.session_state.trigger_example = False |
|
|
if not prompt: |
|
|
st.error("Please enter a prompt!") |
|
|
else: |
|
|
with st.spinner("Generating text..."): |
|
|
output = generate_text(prompt, tone, max_length, temperature, top_p, repetition_penalty) |
|
|
|
|
|
st.markdown(f""" |
|
|
<div style=" |
|
|
background: linear-gradient(145deg, #ffffff33, #1f1f1f99); |
|
|
color: #f5f5f5; |
|
|
padding: 1rem; |
|
|
border-radius: 12px; |
|
|
box-shadow: 0 4px 15px rgba(0,0,0,0.25); |
|
|
font-family: 'Courier New', monospace; |
|
|
font-size: 1rem; |
|
|
white-space: pre-wrap; |
|
|
backdrop-filter: blur(6px); |
|
|
border: 1px solid rgba(255,255,255,0.1); |
|
|
margin-top: 1rem; |
|
|
"> |
|
|
{output} |
|
|
</div> |
|
|
""", unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
|
|
|
wordcloud = WordCloud(width=400, height=200, background_color="white").generate(output) |
|
|
plt.figure(figsize=(8, 4)) |
|
|
plt.imshow(wordcloud, interpolation="bilinear") |
|
|
plt.axis("off") |
|
|
st.pyplot(plt) |
|
|
|
|
|
|
|
|
st.markdown("---") |
|
|
col1, col2 = st.columns([3, 1]) |
|
|
with col1: |
|
|
st.write("Built with ❤️ by Utkarsh Shukla for GSoC Proposal 2025 | Powered by (Gemma + Hugging Face) and Saiyan Pride") |
|
|
st.write("Wish me luck, 🤞") |
|
|
with col2: |
|
|
st.image("images/gemma_logo.png", width=80, caption="Gemma by DeepMind") |