Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,7 +10,23 @@ model.eval()
|
|
| 10 |
def predict(input_text):
|
| 11 |
# Your preprocessing and prediction code
|
| 12 |
# output = model(input_text)
|
| 13 |
-
|
|
|
|
| 14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
interface = gr.Interface(fn=predict, inputs="text", outputs="text")
|
| 16 |
interface.launch()
|
|
|
|
| 10 |
def predict(input_text):
|
| 11 |
# Your preprocessing and prediction code
|
| 12 |
# output = model(input_text)
|
| 13 |
+
tokens = tokenize(input_text) # Define this function based on your tokenizer
|
| 14 |
+
input_tensor = torch.tensor(tokens).unsqueeze(0) # Add batch dimension
|
| 15 |
|
| 16 |
+
with torch.no_grad():
|
| 17 |
+
output = model(input_tensor)
|
| 18 |
+
|
| 19 |
+
# Postprocess output
|
| 20 |
+
predicted_tokens = torch.argmax(output, dim=-1).squeeze().tolist()
|
| 21 |
+
predicted_text = detokenize(predicted_tokens) # Define this function based on your tokenizer
|
| 22 |
+
|
| 23 |
+
return predicted_text # Replace with actual prediction logic
|
| 24 |
+
def tokenize(text):
|
| 25 |
+
# Example tokenizer function
|
| 26 |
+
return [ord(char) for char in text] # Replace with actual tokenizer
|
| 27 |
+
|
| 28 |
+
def detokenize(tokens):
|
| 29 |
+
# Example detokenizer function
|
| 30 |
+
return ''.join([chr(token) for token in tokens]) # Replace with actual detokenizer
|
| 31 |
interface = gr.Interface(fn=predict, inputs="text", outputs="text")
|
| 32 |
interface.launch()
|