Phewww, forgot 3 commas and system crashed, My bad. Maybe this will be final commit to this prototype
Browse files
app.py
CHANGED
|
@@ -82,7 +82,7 @@ def generate_text(prompt, tone, max_length, temperature=0.7, top_p=0.9, repetiti
|
|
| 82 |
# max_length=max_length + len(input_text.split()),
|
| 83 |
# This sets how long the generated text can be. We add the number of words in our input text (len(input_text.split())) to the max_length the user picked, so the model knows how many total words to create.
|
| 84 |
# CHANGE: Use max_new_tokens for clarity instead of calculating total length
|
| 85 |
-
max_new_tokens=max_length
|
| 86 |
# Generate THIS many NEW tokens
|
| 87 |
temperature=temperature,
|
| 88 |
# This controls how creative the model gets. A lower temperature (e.g., 0.7) keeps things more predictable, while a higher one makes it wilder and more random—think of it like adjusting the spice level!
|
|
@@ -92,7 +92,7 @@ def generate_text(prompt, tone, max_length, temperature=0.7, top_p=0.9, repetiti
|
|
| 92 |
# This stops the model from repeating the same words too much. A higher value (e.g., 1.5) pushes it to try new words, like telling it to mix up its vocabulary!
|
| 93 |
num_return_sequences=1,
|
| 94 |
# This tells the model to give us just one version of the text. If we wanted more options, we could change
|
| 95 |
-
do_sample=True
|
| 96 |
pad_token_id=tokenizer.eos_token_id # Good practice for generation
|
| 97 |
)
|
| 98 |
# --- Decode ONLY the generated part ---
|
|
|
|
| 82 |
# max_length=max_length + len(input_text.split()),
|
| 83 |
# This sets how long the generated text can be. We add the number of words in our input text (len(input_text.split())) to the max_length the user picked, so the model knows how many total words to create.
|
| 84 |
# CHANGE: Use max_new_tokens for clarity instead of calculating total length
|
| 85 |
+
max_new_tokens=max_length,
|
| 86 |
# Generate THIS many NEW tokens
|
| 87 |
temperature=temperature,
|
| 88 |
# This controls how creative the model gets. A lower temperature (e.g., 0.7) keeps things more predictable, while a higher one makes it wilder and more random—think of it like adjusting the spice level!
|
|
|
|
| 92 |
# This stops the model from repeating the same words too much. A higher value (e.g., 1.5) pushes it to try new words, like telling it to mix up its vocabulary!
|
| 93 |
num_return_sequences=1,
|
| 94 |
# This tells the model to give us just one version of the text. If we wanted more options, we could change
|
| 95 |
+
do_sample=True,
|
| 96 |
pad_token_id=tokenizer.eos_token_id # Good practice for generation
|
| 97 |
)
|
| 98 |
# --- Decode ONLY the generated part ---
|