removed async from llama_generation
Browse files
app.py
CHANGED
|
@@ -80,7 +80,7 @@ def gpt_generation(input: str,
|
|
| 80 |
|
| 81 |
# Place just input pass and return generation output
|
| 82 |
@spaces.GPU(duration=120)
|
| 83 |
-
|
| 84 |
history: list,
|
| 85 |
temperature: float,
|
| 86 |
max_new_tokens: int):
|
|
|
|
| 80 |
|
| 81 |
# Place just input pass and return generation output
|
| 82 |
@spaces.GPU(duration=120)
|
| 83 |
+
def llama_generation(input_text: str,
|
| 84 |
history: list,
|
| 85 |
temperature: float,
|
| 86 |
max_new_tokens: int):
|