Update tasks/text.py
Browse files- tasks/text.py +58 -46
tasks/text.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
from fastapi import APIRouter
|
| 2 |
from datetime import datetime
|
| 3 |
from datasets import load_dataset
|
|
|
|
| 4 |
from sklearn.metrics import accuracy_score
|
| 5 |
import random
|
| 6 |
|
|
@@ -64,71 +65,82 @@ async def evaluate_text(request: TextEvaluationRequest):
|
|
| 64 |
|
| 65 |
# Make random predictions (placeholder for actual model inference)
|
| 66 |
true_labels = test_dataset["label"]
|
| 67 |
-
predictions = [random.randint(0, 7) for _ in range(len(true_labels))]
|
|
|
|
|
|
|
| 68 |
path_model = 'MatthiasPicard/checkpoint4200_batch16_modern_bert_valloss_0.79_0.74acc'
|
| 69 |
path_tokenizer = "answerdotai/ModernBERT-base"
|
| 70 |
|
| 71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
tokenizer = AutoTokenizer.from_pretrained(path_tokenizer)
|
| 73 |
|
|
|
|
| 74 |
def preprocess_function(df):
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
| 76 |
tokenized_test = test_dataset.map(preprocess_function, batched=True)
|
| 77 |
-
|
| 78 |
-
# training_args = torch.load("training_args.bin")
|
| 79 |
-
# training_args.eval_strategy='no'
|
| 80 |
-
|
| 81 |
-
model = model.half()
|
| 82 |
-
model.eval()
|
| 83 |
|
| 84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
|
| 93 |
-
|
|
|
|
|
|
|
| 94 |
|
| 95 |
-
|
|
|
|
| 96 |
|
| 97 |
-
# path_model = '
|
| 98 |
# path_tokenizer = "answerdotai/ModernBERT-base"
|
| 99 |
|
| 100 |
-
#
|
| 101 |
-
# model = AutoModelForSequenceClassification.from_pretrained(path_model).to(device).eval()
|
| 102 |
# tokenizer = AutoTokenizer.from_pretrained(path_tokenizer)
|
| 103 |
-
|
| 104 |
-
# model.half()
|
| 105 |
|
| 106 |
-
# # Use optimized tokenization
|
| 107 |
# def preprocess_function(df):
|
| 108 |
-
# return tokenizer(df["quote"], truncation=True
|
| 109 |
-
|
| 110 |
# tokenized_test = test_dataset.map(preprocess_function, batched=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
|
| 112 |
-
#
|
| 113 |
-
# def collate_fn(batch):
|
| 114 |
-
# input_ids = torch.tensor([example["input_ids"] for example in batch]).to(device)
|
| 115 |
-
# attention_mask = torch.tensor([example["attention_mask"] for example in batch]).to(device)
|
| 116 |
-
# return {"input_ids": input_ids, "attention_mask": attention_mask}
|
| 117 |
-
|
| 118 |
-
# Optimized inference function
|
| 119 |
-
# def predict(dataset, batch_size=16):
|
| 120 |
-
# all_preds = []
|
| 121 |
-
# with torch.no_grad(): # No gradient computation (saves energy)
|
| 122 |
-
# for batch in torch.utils.data.DataLoader(dataset, batch_size=batch_size, collate_fn=collate_fn):
|
| 123 |
-
# outputs = model(**batch)
|
| 124 |
-
# preds = torch.argmax(outputs.logits, dim=-1).cpu().numpy()
|
| 125 |
-
# all_preds.extend(preds)
|
| 126 |
-
# return np.array(all_preds)
|
| 127 |
-
|
| 128 |
-
# Run inference
|
| 129 |
-
# predictions = predict(tokenized_test)
|
| 130 |
-
# print(predictions)
|
| 131 |
-
predictions = np.array([np.argmax(x) for x in preds[0]])
|
| 132 |
|
| 133 |
#--------------------------------------------------------------------------------------------
|
| 134 |
# YOUR MODEL INFERENCE STOPS HERE
|
|
|
|
| 1 |
from fastapi import APIRouter
|
| 2 |
from datetime import datetime
|
| 3 |
from datasets import load_dataset
|
| 4 |
+
from torch.utils.data import DataLoader
|
| 5 |
from sklearn.metrics import accuracy_score
|
| 6 |
import random
|
| 7 |
|
|
|
|
| 65 |
|
| 66 |
# Make random predictions (placeholder for actual model inference)
|
| 67 |
true_labels = test_dataset["label"]
|
| 68 |
+
# predictions = [random.randint(0, 7) for _ in range(len(true_labels))]
|
| 69 |
+
|
| 70 |
+
# Chemins du modèle et du tokenizer
|
| 71 |
path_model = 'MatthiasPicard/checkpoint4200_batch16_modern_bert_valloss_0.79_0.74acc'
|
| 72 |
path_tokenizer = "answerdotai/ModernBERT-base"
|
| 73 |
|
| 74 |
+
# Détection du GPU
|
| 75 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 76 |
+
|
| 77 |
+
# Charger le modèle et le tokenizer
|
| 78 |
+
model = AutoModelForSequenceClassification.from_pretrained(path_model).half().to(device) # Model en half precision sur GPU
|
| 79 |
tokenizer = AutoTokenizer.from_pretrained(path_tokenizer)
|
| 80 |
|
| 81 |
+
# Fonction de préprocessing
|
| 82 |
def preprocess_function(df):
|
| 83 |
+
tokenized = tokenizer(df["quote"], truncation=True) # Removed padding here
|
| 84 |
+
return tokenized
|
| 85 |
+
|
| 86 |
+
# Appliquer le préprocessing
|
| 87 |
tokenized_test = test_dataset.map(preprocess_function, batched=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
|
| 89 |
+
# Convertir le dataset au format PyTorch
|
| 90 |
+
tokenized_test.set_format(type="torch", columns=["input_ids", "attention_mask"])
|
| 91 |
+
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
|
| 92 |
+
|
| 93 |
+
# Créer le DataLoader avec un batch_size > 1 pour optimiser le passage GPU
|
| 94 |
+
batch_size = 4 # Ajuster selon la mémoire dispo sur GPU
|
| 95 |
+
test_loader = DataLoader(tokenized_test, batch_size=batch_size, collate_fn=data_collator)
|
| 96 |
+
|
| 97 |
+
model = model.half()
|
| 98 |
+
model.eval()
|
| 99 |
|
| 100 |
+
# Inférence sur GPU
|
| 101 |
+
predictions = []
|
| 102 |
+
with torch.no_grad():
|
| 103 |
+
for batch in test_loader:
|
| 104 |
+
input_ids = batch['input_ids'].to(device)
|
| 105 |
+
attention_mask = batch['attention_mask'].to(device)
|
| 106 |
|
| 107 |
+
outputs = model(input_ids=input_ids, attention_mask=attention_mask)
|
| 108 |
+
logits = outputs.logits
|
| 109 |
+
preds = torch.argmax(logits, dim=-1)
|
| 110 |
|
| 111 |
+
predictions.extend(preds.cpu().numpy()) # Remettre sur CPU pour stockage
|
| 112 |
+
|
| 113 |
|
| 114 |
+
# path_model = 'MatthiasPicard/checkpoint4200_batch16_modern_bert_valloss_0.79_0.74acc'
|
| 115 |
# path_tokenizer = "answerdotai/ModernBERT-base"
|
| 116 |
|
| 117 |
+
# model = AutoModelForSequenceClassification.from_pretrained(path_model)
|
|
|
|
| 118 |
# tokenizer = AutoTokenizer.from_pretrained(path_tokenizer)
|
|
|
|
|
|
|
| 119 |
|
|
|
|
| 120 |
# def preprocess_function(df):
|
| 121 |
+
# return tokenizer(df["quote"], truncation=True)
|
|
|
|
| 122 |
# tokenized_test = test_dataset.map(preprocess_function, batched=True)
|
| 123 |
+
|
| 124 |
+
# # training_args = torch.load("training_args.bin")
|
| 125 |
+
# # training_args.eval_strategy='no'
|
| 126 |
+
|
| 127 |
+
# model = model.half()
|
| 128 |
+
# model.eval()
|
| 129 |
+
|
| 130 |
+
# data_collator = DataCollatorWithPadding(tokenizer)
|
| 131 |
+
|
| 132 |
+
# trainer = Trainer(
|
| 133 |
+
# model=model,
|
| 134 |
+
# # args=training_args,
|
| 135 |
+
# tokenizer=tokenizer,
|
| 136 |
+
# data_collator=data_collator
|
| 137 |
+
# )
|
| 138 |
+
|
| 139 |
+
# trainer.args.per_device_eval_batch_size = 16
|
| 140 |
+
|
| 141 |
+
# preds = trainer.predict(tokenized_test)
|
| 142 |
|
| 143 |
+
# predictions = np.array([np.argmax(x) for x in preds[0]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 144 |
|
| 145 |
#--------------------------------------------------------------------------------------------
|
| 146 |
# YOUR MODEL INFERENCE STOPS HERE
|