Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
# BE EXPLICIT: Import the specific model class we need
|
| 3 |
-
from transformers import AutoTokenizer,
|
| 4 |
import torch
|
| 5 |
import os
|
| 6 |
|
|
@@ -23,7 +23,8 @@ try:
|
|
| 23 |
# THE FIX: Use the explicit class instead of AutoModelForSequenceClassification.
|
| 24 |
# This ignores the problematic 'auto_map' in config.json and forces the
|
| 25 |
# use of the standard XLM-RoBERTa architecture for sequence classification.
|
| 26 |
-
model =
|
|
|
|
| 27 |
|
| 28 |
# Move the model to the selected device
|
| 29 |
model.to(device)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
# BE EXPLICIT: Import the specific model class we need
|
| 3 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 4 |
import torch
|
| 5 |
import os
|
| 6 |
|
|
|
|
| 23 |
# THE FIX: Use the explicit class instead of AutoModelForSequenceClassification.
|
| 24 |
# This ignores the problematic 'auto_map' in config.json and forces the
|
| 25 |
# use of the standard XLM-RoBERTa architecture for sequence classification.
|
| 26 |
+
model = AutoModelForSequenceClassification.from_pretrained("Lajavaness/bilingual-embedding-base", token=HF_TOKEN, trust_remote_code=True)
|
| 27 |
+
model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID, token=HF_TOKEN, trust_remote_code=True)
|
| 28 |
|
| 29 |
# Move the model to the selected device
|
| 30 |
model.to(device)
|