Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -11,15 +11,17 @@ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float
|
|
| 11 |
def generate_response(prompt):
|
| 12 |
system_prompt = "Responde solo con el texto solicitado, sin informaci贸n personal ni datos irrelevantes."
|
| 13 |
|
| 14 |
-
inputs = tokenizer(
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
-
|
| 17 |
outputs = model.generate(
|
| 18 |
inputs.input_ids,
|
| 19 |
-
max_length=50, # 馃敼
|
| 20 |
do_sample=True,
|
| 21 |
-
temperature=0.5, # 馃敼
|
| 22 |
-
top_p=0.85, # 馃敼
|
| 23 |
repetition_penalty=1.2, # 馃敼 Evita respuestas repetitivas
|
| 24 |
early_stopping=True, # 馃敼 Detiene la respuesta si ya est谩 completa
|
| 25 |
)
|
|
@@ -35,3 +37,4 @@ with gr.Blocks() as demo:
|
|
| 35 |
submit_button.click(generate_response, inputs=input_text, outputs=output_text)
|
| 36 |
|
| 37 |
demo.launch()
|
|
|
|
|
|
| 11 |
def generate_response(prompt):
|
| 12 |
system_prompt = "Responde solo con el texto solicitado, sin informaci贸n personal ni datos irrelevantes."
|
| 13 |
|
| 14 |
+
inputs = tokenizer(
|
| 15 |
+
f"Instrucci贸n: {system_prompt} \n Pregunta: {prompt} \n Respuesta directa:",
|
| 16 |
+
return_tensors="pt"
|
| 17 |
+
)
|
| 18 |
|
|
|
|
| 19 |
outputs = model.generate(
|
| 20 |
inputs.input_ids,
|
| 21 |
+
max_length=50, # 馃敼 Limita la respuesta a 50 tokens
|
| 22 |
do_sample=True,
|
| 23 |
+
temperature=0.5, # 馃敼 Menos aleatoriedad, m谩s precisi贸n
|
| 24 |
+
top_p=0.85, # 馃敼 M谩s controlado
|
| 25 |
repetition_penalty=1.2, # 馃敼 Evita respuestas repetitivas
|
| 26 |
early_stopping=True, # 馃敼 Detiene la respuesta si ya est谩 completa
|
| 27 |
)
|
|
|
|
| 37 |
submit_button.click(generate_response, inputs=input_text, outputs=output_text)
|
| 38 |
|
| 39 |
demo.launch()
|
| 40 |
+
|