|
|
import gradio as gr |
|
|
from flask import Flask, jsonify |
|
|
from transformers import AutoTokenizer |
|
|
import os |
|
|
import json |
|
|
import torch |
|
|
import torch.nn.functional as F |
|
|
|
|
|
print("library loaded-------------------------------------------------") |
|
|
app = Flask(__name__) |
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained('KB/bert-base-swedish-cased', model_max_length=128) |
|
|
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") |
|
|
|
|
|
|
|
|
path_inapp = './inapp_model' |
|
|
inapp_model = torch.load(path_inapp, map_location=torch.device('cpu')) |
|
|
|
|
|
path_conf = './conf_model' |
|
|
conf_model = torch.load(path_conf, map_location=torch.device('cpu')) |
|
|
|
|
|
def greet(txt): |
|
|
|
|
|
print('model is predicting-----------------------------------------') |
|
|
btc = tokenizer(txt, padding='max_length', truncation=True, return_tensors='pt') |
|
|
btc.to(device) |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs_inapp = inapp_model(**btc) |
|
|
predictions = F.softmax(outputs_inapp.logits, dim=1) |
|
|
inapp_score = [predictions[0][0].item(), predictions[0][1].item()] |
|
|
inapp_labels = torch.argmax(predictions, dim=1) |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs_conf = conf_model(**btc) |
|
|
predictions = F.softmax(outputs_conf.logits, dim=1) |
|
|
conf_score = [predictions[0][0].item(), predictions[0][1].item()] |
|
|
conf_labels = torch.argmax(predictions, dim=1) |
|
|
|
|
|
flag_inapp = str(inapp_labels[0].item()) |
|
|
flag_conf = str(conf_labels[0].item()) |
|
|
|
|
|
post_body = {"text": txt, "flag_inappropriate": {"is_inappropiate": flag_inapp, "score": inapp_score}, "flag_confidential": {"is_private": flag_conf, "score": conf_score}} |
|
|
|
|
|
return post_body |
|
|
|
|
|
iface = gr.Interface(fn=greet, inputs="text", outputs=gr.outputs.JSON(label="Output")) |
|
|
iface.launch() |