Update interface.py
Browse files- interface.py +2 -1
interface.py
CHANGED
|
@@ -11,6 +11,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
| 11 |
from sympy import symbols, sympify, lambdify
|
| 12 |
import copy
|
| 13 |
from config import DEVICE, MODEL_PATH, MAX_LENGTH, TEMPERATURE
|
|
|
|
| 14 |
|
| 15 |
# Configuraci贸n del dispositivo
|
| 16 |
device = DEVICE
|
|
@@ -22,7 +23,7 @@ model = AutoModelForCausalLM.from_pretrained(model_path)
|
|
| 22 |
model.to(device)
|
| 23 |
model.eval()
|
| 24 |
|
| 25 |
-
@
|
| 26 |
def generate_analysis(prompt, max_length=MAX_LENGTH):
|
| 27 |
try:
|
| 28 |
input_ids = tokenizer.encode(prompt, return_tensors='pt').to(device)
|
|
|
|
| 11 |
from sympy import symbols, sympify, lambdify
|
| 12 |
import copy
|
| 13 |
from config import DEVICE, MODEL_PATH, MAX_LENGTH, TEMPERATURE
|
| 14 |
+
from decorators import spaces # Importamos el decorador
|
| 15 |
|
| 16 |
# Configuraci贸n del dispositivo
|
| 17 |
device = DEVICE
|
|
|
|
| 23 |
model.to(device)
|
| 24 |
model.eval()
|
| 25 |
|
| 26 |
+
@spaces.GPU(duration=300) # Aplicamos el decorador para asegurar uso de GPU
|
| 27 |
def generate_analysis(prompt, max_length=MAX_LENGTH):
|
| 28 |
try:
|
| 29 |
input_ids = tokenizer.encode(prompt, return_tensors='pt').to(device)
|