Spaces:
Sleeping
Sleeping
Update src/components/ai_core_async_methods.py
Browse files
src/components/ai_core_async_methods.py
CHANGED
|
@@ -149,7 +149,7 @@ def _generate_model_response(self, prompt: str) -> str:
|
|
| 149 |
return_tensors="pt",
|
| 150 |
padding=True,
|
| 151 |
truncation=True,
|
| 152 |
-
max_length=512
|
| 153 |
)
|
| 154 |
|
| 155 |
# Move to GPU if available
|
|
@@ -159,13 +159,13 @@ def _generate_model_response(self, prompt: str) -> str:
|
|
| 159 |
# Set generation config for balanced, natural responses
|
| 160 |
from transformers import GenerationConfig
|
| 161 |
generation_config = GenerationConfig(
|
| 162 |
-
max_length=512
|
| 163 |
num_return_sequences=1,
|
| 164 |
no_repeat_ngram_size=3,
|
| 165 |
do_sample=True,
|
| 166 |
pad_token_id=self.tokenizer.eos_token_id,
|
| 167 |
repetition_penalty=1.3,
|
| 168 |
-
min_length=20
|
| 169 |
eos_token_id=self.tokenizer.eos_token_id
|
| 170 |
)
|
| 171 |
self.model.generation_config = generation_config
|
|
|
|
| 149 |
return_tensors="pt",
|
| 150 |
padding=True,
|
| 151 |
truncation=True,
|
| 152 |
+
max_length=1024 # Increased from 512 to allow longer prompts
|
| 153 |
)
|
| 154 |
|
| 155 |
# Move to GPU if available
|
|
|
|
| 159 |
# Set generation config for balanced, natural responses
|
| 160 |
from transformers import GenerationConfig
|
| 161 |
generation_config = GenerationConfig(
|
| 162 |
+
max_length=1024, # Increased from 512 for longer responses
|
| 163 |
num_return_sequences=1,
|
| 164 |
no_repeat_ngram_size=3,
|
| 165 |
do_sample=True,
|
| 166 |
pad_token_id=self.tokenizer.eos_token_id,
|
| 167 |
repetition_penalty=1.3,
|
| 168 |
+
min_length=50, # Increased from 20 to ensure meaningful responses
|
| 169 |
eos_token_id=self.tokenizer.eos_token_id
|
| 170 |
)
|
| 171 |
self.model.generation_config = generation_config
|