# models.py from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch from PIL import Image from torchvision import transforms from torchvision.models import efficientnet_b0 # ------------------- # Текстовые модели # ------------------- class TextModels: def __init__(self): # модель 1 self.tokenizer1 = AutoTokenizer.from_pretrained("VSAsteroid/ai-text-detector-hc3") self.model1 = AutoModelForSequenceClassification.from_pretrained("VSAsteroid/ai-text-detector-hc3") self.model1.eval() # модель 2 (для ensemble) self.tokenizer2 = AutoTokenizer.from_pretrained("silentone0725/text-detector-model-v2") self.model2 = AutoModelForSequenceClassification.from_pretrained("silentone0725/text-detector-model-v2") self.model2.eval() def predict(self, text): # модель 1 inputs1 = self.tokenizer1(text, return_tensors="pt", truncation=True, max_length=512) with torch.no_grad(): outputs1 = self.model1(**inputs1) prob1 = torch.softmax(outputs1.logits, dim=1)[0][1].item() # AI probability # модель 2 inputs2 = self.tokenizer2(text, return_tensors="pt", truncation=True, max_length=512) with torch.no_grad(): outputs2 = self.model2(**inputs2) prob2 = torch.softmax(outputs2.logits, dim=1)[0][1].item() # AI probability # ensemble return (prob1 + prob2) / 2 # ------------------- # Изображения # ------------------- class ImageModel: def __init__(self): self.model = efficientnet_b0(pretrained=True) self.model.eval() # трансформация для EfficientNet self.transform = transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225]) ]) def predict(self, image: Image.Image): img = self.transform(image).unsqueeze(0) # batch 1 with torch.no_grad(): outputs = self.model(img) # для MVP используем sigmoid на один нейрон prob = torch.sigmoid(outputs[:,0])[0].item() return prob