Update README.md
Browse files
README.md
CHANGED
|
@@ -71,17 +71,17 @@ model.eval()
|
|
| 71 |
# Based on the inference code by `tloen/alpaca-lora`
|
| 72 |
def generate_prompt(instruction, input=None):
|
| 73 |
if input:
|
| 74 |
-
return f"""
|
| 75 |
-
###
|
| 76 |
{instruction}
|
| 77 |
-
###
|
| 78 |
{input}
|
| 79 |
-
###
|
| 80 |
else:
|
| 81 |
-
return f"""
|
| 82 |
-
###
|
| 83 |
{instruction}
|
| 84 |
-
###
|
| 85 |
|
| 86 |
def generate(
|
| 87 |
instruction,
|
|
@@ -112,10 +112,10 @@ def generate(
|
|
| 112 |
)
|
| 113 |
s = generation_output.sequences[0]
|
| 114 |
output = tokenizer.decode(s)
|
| 115 |
-
return output.split("### Response:")[1]
|
| 116 |
|
| 117 |
instruction = "驴Qu茅 es un chivo?"
|
| 118 |
|
| 119 |
-
print("
|
| 120 |
-
print("
|
| 121 |
```
|
|
|
|
| 71 |
# Based on the inference code by `tloen/alpaca-lora`
|
| 72 |
def generate_prompt(instruction, input=None):
|
| 73 |
if input:
|
| 74 |
+
return f"""A continuaci贸n se muestra una instrucci贸n que describe una tarea, emparejada con una entrada que proporciona m谩s contexto. Escribe una respuesta que complete adecuadamente la petici贸n.
|
| 75 |
+
### Instrucci贸n:
|
| 76 |
{instruction}
|
| 77 |
+
### Entrada:
|
| 78 |
{input}
|
| 79 |
+
### Respuesta:"""
|
| 80 |
else:
|
| 81 |
+
return f"""A continuaci贸n se muestra una instrucci贸n que describe una tarea. Escribe una respuesta que complete adecuadamente la petici贸n.
|
| 82 |
+
### Instrucci贸n:
|
| 83 |
{instruction}
|
| 84 |
+
### Respuesta:"""
|
| 85 |
|
| 86 |
def generate(
|
| 87 |
instruction,
|
|
|
|
| 112 |
)
|
| 113 |
s = generation_output.sequences[0]
|
| 114 |
output = tokenizer.decode(s)
|
| 115 |
+
return output.split("### Response:")[1]
|
| 116 |
|
| 117 |
instruction = "驴Qu茅 es un chivo?"
|
| 118 |
|
| 119 |
+
print("Instrucci贸n:", instruction)
|
| 120 |
+
print("Respuesta:", generate(instruction))
|
| 121 |
```
|