Spaces:
Running
Running
Ranam Hamoud
commited on
Commit
·
8528e25
1
Parent(s):
1703de7
Fix: use torch_dtype instead of device_map to avoid accelerate dependency
Browse files- plagiarism_detection.py +4 -3
plagiarism_detection.py
CHANGED
|
@@ -80,13 +80,14 @@ def ai_plagiarism_detection(text, threshold=0.5, show_results=False):
|
|
| 80 |
model_directory = "desklib/ai-text-detector-v1.01"
|
| 81 |
# Set up device
|
| 82 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 83 |
-
# Load tokenizer and model
|
| 84 |
tokenizer = AutoTokenizer.from_pretrained(model_directory)
|
|
|
|
| 85 |
model = DesklibAIDetectionModel.from_pretrained(
|
| 86 |
model_directory,
|
| 87 |
-
|
| 88 |
-
low_cpu_mem_usage=False
|
| 89 |
)
|
|
|
|
| 90 |
# Predict
|
| 91 |
probability, ai_detected = predict_single_text(text, model, tokenizer, device, threshold=threshold)
|
| 92 |
# to print results
|
|
|
|
| 80 |
model_directory = "desklib/ai-text-detector-v1.01"
|
| 81 |
# Set up device
|
| 82 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 83 |
+
# Load tokenizer and model
|
| 84 |
tokenizer = AutoTokenizer.from_pretrained(model_directory)
|
| 85 |
+
# Load model to CPU first, then move to device (avoids meta tensor issues)
|
| 86 |
model = DesklibAIDetectionModel.from_pretrained(
|
| 87 |
model_directory,
|
| 88 |
+
torch_dtype=torch.float32
|
|
|
|
| 89 |
)
|
| 90 |
+
model = model.to(device)
|
| 91 |
# Predict
|
| 92 |
probability, ai_detected = predict_single_text(text, model, tokenizer, device, threshold=threshold)
|
| 93 |
# to print results
|