Added the final comments to Complete the Tutorial keeping in mind Previous beginner me is following the code to learn.
Browse files
app.py
CHANGED
|
@@ -57,6 +57,7 @@ def load_model_and_tokenizer():
|
|
| 57 |
|
| 58 |
|
| 59 |
# Function to generate text with Gemma
|
|
|
|
| 60 |
def generate_text(prompt, tone, max_length, temperature=0.7, top_p=0.9, repetition_penalty=1.0):
|
| 61 |
tokenizer, model = load_model_and_tokenizer()
|
| 62 |
# Adjust prompt based on tone
|
|
@@ -66,20 +67,29 @@ def generate_text(prompt, tone, max_length, temperature=0.7, top_p=0.9, repetiti
|
|
| 66 |
"Serious": f"Provide a detailed, thoughtful, and professional response to: {prompt}. Offer logical reasoning, depth, and a formal tone, as if explaining to an expert audience. For example, if the prompt is 'The future of AI,' discuss ethical implications and technical challenges in 2-3 sentences.",
|
| 67 |
"Poetic": f"Write a vivid, poetic response to: {prompt}. Use metaphor, rhythm, and imagery to create a lyrical flow, as if crafting a short verse. For instance, if the prompt is 'The river flows,' respond with 'The river flows, a silver thread, / Through emerald dreams where silence tread.'"
|
| 68 |
}
|
|
|
|
| 69 |
input_text = tone_prompts.get(tone, prompt)
|
| 70 |
-
|
| 71 |
inputs = tokenizer(input_text, return_tensors="pt")
|
|
|
|
| 72 |
outputs = model.generate(
|
| 73 |
inputs["input_ids"],
|
| 74 |
max_length=max_length + len(input_text.split()),
|
|
|
|
| 75 |
temperature=temperature,
|
|
|
|
| 76 |
top_p=top_p,
|
|
|
|
| 77 |
repetition_penalty=repetition_penalty,
|
|
|
|
| 78 |
num_return_sequences=1,
|
|
|
|
| 79 |
do_sample=True
|
| 80 |
)
|
|
|
|
| 81 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 82 |
-
|
|
|
|
| 83 |
# Clean and Solid UI for our Project, keeping the blue theme of gemini.
|
| 84 |
# We will continue tutorial after this st.markdown()
|
| 85 |
st.markdown("""
|
|
@@ -335,8 +345,12 @@ if "trigger_example" not in st.session_state:
|
|
| 335 |
with col1:
|
| 336 |
if st.button("✨ Funny Cat Story"):
|
| 337 |
st.session_state.prompt = "The cat hacked my WiFi"
|
|
|
|
| 338 |
st.session_state.tone = "Funny"
|
|
|
|
| 339 |
st.session_state.trigger_example = True
|
|
|
|
|
|
|
| 340 |
with col2:
|
| 341 |
if st.button("🌅 Poetic Goodbye"):
|
| 342 |
st.session_state.prompt = "As the sun set on our final day"
|
|
@@ -350,15 +364,19 @@ with col3:
|
|
| 350 |
|
| 351 |
# Clean form with better spacing
|
| 352 |
with st.form(key="input_form"):
|
|
|
|
| 353 |
st.markdown('<div style="margin-bottom: 15px;"><h3 style="color: #4A5BEA; margin-bottom: 10px;">Generate Your Text</h3></div>', unsafe_allow_html=True)
|
| 354 |
-
|
| 355 |
prompt = st.text_input("Enter a prompt", placeholder="e.g., 'The future of AI is'", value=st.session_state.get("prompt", ""))
|
| 356 |
-
|
| 357 |
col1, col2 = st.columns(2)
|
|
|
|
| 358 |
with col1:
|
| 359 |
tone = st.selectbox("Tone", ["Funny", "Serious", "Poetic"], index=["Funny", "Serious", "Poetic"].index(st.session_state.get("tone", "Funny")))
|
| 360 |
with col2:
|
| 361 |
max_length = st.slider("Word count", 20, 100, 50)
|
|
|
|
|
|
|
| 362 |
|
| 363 |
st.markdown('<div class="parameter-card"><h4>Advanced Parameters</h4>', unsafe_allow_html=True)
|
| 364 |
|
|
@@ -394,11 +412,15 @@ with st.form(key="input_form"):
|
|
| 394 |
|
| 395 |
# Generate and display output
|
| 396 |
if submit_button or st.session_state.trigger_example:
|
|
|
|
| 397 |
st.session_state.trigger_example = False
|
|
|
|
| 398 |
if not prompt:
|
| 399 |
st.error("Please enter a prompt!")
|
|
|
|
| 400 |
else:
|
| 401 |
with st.spinner("Generating text..."):
|
|
|
|
| 402 |
output = generate_text(prompt, tone, max_length, temperature, top_p, repetition_penalty)
|
| 403 |
|
| 404 |
# Display metadata about the generation with improved value styling
|
|
@@ -419,6 +441,7 @@ if submit_button or st.session_state.trigger_example:
|
|
| 419 |
# WordCloud visualization
|
| 420 |
st.markdown('<div class="wordcloud-container">', unsafe_allow_html=True)
|
| 421 |
st.markdown('<h4 style="color: #4A5BEA; margin-top: 0;">Word Cloud Visualization</h4>', unsafe_allow_html=True)
|
|
|
|
| 422 |
|
| 423 |
# Generate a clean wordcloud
|
| 424 |
wordcloud = WordCloud(
|
|
@@ -445,4 +468,7 @@ st.markdown("""
|
|
| 445 |
</p>
|
| 446 |
<p style="margin-top: 10px;">Wish me luck! 🤞</p>
|
| 447 |
</div>
|
| 448 |
-
""", unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
|
| 58 |
|
| 59 |
# Function to generate text with Gemma
|
| 60 |
+
# This calls the function we just made to get the tokenizer and model ready to work.
|
| 61 |
def generate_text(prompt, tone, max_length, temperature=0.7, top_p=0.9, repetition_penalty=1.0):
|
| 62 |
tokenizer, model = load_model_and_tokenizer()
|
| 63 |
# Adjust prompt based on tone
|
|
|
|
| 67 |
"Serious": f"Provide a detailed, thoughtful, and professional response to: {prompt}. Offer logical reasoning, depth, and a formal tone, as if explaining to an expert audience. For example, if the prompt is 'The future of AI,' discuss ethical implications and technical challenges in 2-3 sentences.",
|
| 68 |
"Poetic": f"Write a vivid, poetic response to: {prompt}. Use metaphor, rhythm, and imagery to create a lyrical flow, as if crafting a short verse. For instance, if the prompt is 'The river flows,' respond with 'The river flows, a silver thread, / Through emerald dreams where silence tread.'"
|
| 69 |
}
|
| 70 |
+
# This creates a dictionary that holds different prompts based on the tone we pick, making sure the model knows how to respond.
|
| 71 |
input_text = tone_prompts.get(tone, prompt)
|
| 72 |
+
# This picks the right instruction from the dictionary based on the tone.
|
| 73 |
inputs = tokenizer(input_text, return_tensors="pt")
|
| 74 |
+
# This turns our input text (with the tone instruction) into a format (tensors) that the model can process using the tokenizer.
|
| 75 |
outputs = model.generate(
|
| 76 |
inputs["input_ids"],
|
| 77 |
max_length=max_length + len(input_text.split()),
|
| 78 |
+
# This sets how long the generated text can be. We add the number of words in our input text (len(input_text.split())) to the max_length the user picked, so the model knows how many total words to create.
|
| 79 |
temperature=temperature,
|
| 80 |
+
# This controls how creative the model gets. A lower temperature (e.g., 0.7) keeps things more predictable, while a higher one makes it wilder and more random—think of it like adjusting the spice level!
|
| 81 |
top_p=top_p,
|
| 82 |
+
# This is like a filter for word choices. It picks from the top percentage of likely words (e.g., 0.9 means 90% of the best options), making the output diverse but not too crazy.
|
| 83 |
repetition_penalty=repetition_penalty,
|
| 84 |
+
# This stops the model from repeating the same words too much. A higher value (e.g., 1.5) pushes it to try new words, like telling it to mix up its vocabulary!
|
| 85 |
num_return_sequences=1,
|
| 86 |
+
# This tells the model to give us just one version of the text. If we wanted more options, we could change
|
| 87 |
do_sample=True
|
| 88 |
)
|
| 89 |
+
# This tells the model to generate text: it uses the input IDs, sets a max length, and adjusts creativity with temperature, top_p, and repetition_penalty.
|
| 90 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 91 |
+
# This turns again the model's output back into readable form, skipping any extra tokens we don’t need.
|
| 92 |
+
|
| 93 |
# Clean and Solid UI for our Project, keeping the blue theme of gemini.
|
| 94 |
# We will continue tutorial after this st.markdown()
|
| 95 |
st.markdown("""
|
|
|
|
| 345 |
with col1:
|
| 346 |
if st.button("✨ Funny Cat Story"):
|
| 347 |
st.session_state.prompt = "The cat hacked my WiFi"
|
| 348 |
+
# This sets the prompt to a fun example when the button is clicked
|
| 349 |
st.session_state.tone = "Funny"
|
| 350 |
+
# This sets the tone to Funny for the example.
|
| 351 |
st.session_state.trigger_example = True
|
| 352 |
+
# This turns on the trigger to automatically generate the example.
|
| 353 |
+
# And then same things with below Poetic and Serious tones of ours, Phewww, don't be sleepy, little more.
|
| 354 |
with col2:
|
| 355 |
if st.button("🌅 Poetic Goodbye"):
|
| 356 |
st.session_state.prompt = "As the sun set on our final day"
|
|
|
|
| 364 |
|
| 365 |
# Clean form with better spacing
|
| 366 |
with st.form(key="input_form"):
|
| 367 |
+
# This starts a form where users can input their own prompt and settings
|
| 368 |
st.markdown('<div style="margin-bottom: 15px;"><h3 style="color: #4A5BEA; margin-bottom: 10px;">Generate Your Text</h3></div>', unsafe_allow_html=True)
|
| 369 |
+
# This adds a styled heading to label the form section.
|
| 370 |
prompt = st.text_input("Enter a prompt", placeholder="e.g., 'The future of AI is'", value=st.session_state.get("prompt", ""))
|
| 371 |
+
# This creates a text box where users can type their prompt, with a placeholder hint and a default value from our example if set.
|
| 372 |
col1, col2 = st.columns(2)
|
| 373 |
+
# This splits the next part into two columns for better layout. Yep Saiyans like orgainised layouts.
|
| 374 |
with col1:
|
| 375 |
tone = st.selectbox("Tone", ["Funny", "Serious", "Poetic"], index=["Funny", "Serious", "Poetic"].index(st.session_state.get("tone", "Funny")))
|
| 376 |
with col2:
|
| 377 |
max_length = st.slider("Word count", 20, 100, 50)
|
| 378 |
+
# This adds a slider for users to set how many words they want in the output, ranging from 20 to 100 with a default of 50.
|
| 379 |
+
# And similarly every slider here works
|
| 380 |
|
| 381 |
st.markdown('<div class="parameter-card"><h4>Advanced Parameters</h4>', unsafe_allow_html=True)
|
| 382 |
|
|
|
|
| 412 |
|
| 413 |
# Generate and display output
|
| 414 |
if submit_button or st.session_state.trigger_example:
|
| 415 |
+
# This checks if the Generate button was clicked or our predefined one click example was triggered.
|
| 416 |
st.session_state.trigger_example = False
|
| 417 |
+
# This resets the example trigger so it doesn’t keep running.
|
| 418 |
if not prompt:
|
| 419 |
st.error("Please enter a prompt!")
|
| 420 |
+
# If user thought clicking generate is fun without entering a prompt. Naah buddy, i stopped you :)
|
| 421 |
else:
|
| 422 |
with st.spinner("Generating text..."):
|
| 423 |
+
# This shows a spinning icon while the text is being created.
|
| 424 |
output = generate_text(prompt, tone, max_length, temperature, top_p, repetition_penalty)
|
| 425 |
|
| 426 |
# Display metadata about the generation with improved value styling
|
|
|
|
| 441 |
# WordCloud visualization
|
| 442 |
st.markdown('<div class="wordcloud-container">', unsafe_allow_html=True)
|
| 443 |
st.markdown('<h4 style="color: #4A5BEA; margin-top: 0;">Word Cloud Visualization</h4>', unsafe_allow_html=True)
|
| 444 |
+
# This starts a container for the word cloud visualization.
|
| 445 |
|
| 446 |
# Generate a clean wordcloud
|
| 447 |
wordcloud = WordCloud(
|
|
|
|
| 468 |
</p>
|
| 469 |
<p style="margin-top: 10px;">Wish me luck! 🤞</p>
|
| 470 |
</div>
|
| 471 |
+
""", unsafe_allow_html=True)
|
| 472 |
+
|
| 473 |
+
# Yep here we done. Hope you guys like my attempt. I really enjoyed this project as Google Gen AI + Kaggle Workshop also helped. Thanks for your time
|
| 474 |
+
# And please let me know the improvements i need, Will appreciate any reviews.
|