File size: 1,310 Bytes
2ff9250
 
 
 
 
 
 
 
 
b1db827
2ff9250
bc6ef8a
2ff9250
 
bc6ef8a
2ff9250
b1db827
 
 
 
 
 
2ff9250
 
ce34350
2ff9250
bc6ef8a
 
2ff9250
c469f55
87fbdff
db9c437
bc6ef8a
c3cd511
 
 
4d60837
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
from keras_preprocessing.sequence import pad_sequences
import modules.utilities.utils as utils
import keras.models as models
import numpy as np

BASE_PATH = './data/'
MODEL = BASE_PATH + 'model/'
TOKEN = BASE_PATH + 'tokenizer/'

class_names = np.array(['Economia', 'Politica', 'Scienza_e_tecnica', 'Sport', 'Storia'])

def predict(model_path, tokenizer_path, sentence):
    tokenizer = utils.load_tokenizer(tokenizer_path)
    x_data = []
    x_data.append(sentence)
    x_tokenized = tokenizer.texts_to_sequences(x_data)
    new_x_tokenized = []
    for x_token in x_tokenized[0]:
        if(x_token is None):
            x_token = 1
        new_x_tokenized.append(x_token)
    x_tokenized = [new_x_tokenized]
    x_pad = pad_sequences(x_tokenized, maxlen=200)
    x_t = x_pad[0]
    model = models.load_model(model_path, compile=False)
    prediction = model.predict(np.array([x_t]))
    #predicted_label = class_names[np.argmax(prediction[0])]
    return prediction#, predicted_label

def multi_classification(text):
    model = MODEL + 'multi-classification.h5'
    tokenizer = TOKEN + 'multi-classification-tokenizer.json'
    labels = predict(model, tokenizer, text)
    response = {}
    for i, label in enumerate(labels[0]):
        response[class_names[i]] = "%.4f" % float(label)
    return response