Gaetano Parente commited on
Commit
82f7563
·
1 Parent(s): 5d81264
modules/__pycache__/binary_classification.cpython-311.pyc CHANGED
Binary files a/modules/__pycache__/binary_classification.cpython-311.pyc and b/modules/__pycache__/binary_classification.cpython-311.pyc differ
 
modules/__pycache__/image_classification.cpython-311.pyc CHANGED
Binary files a/modules/__pycache__/image_classification.cpython-311.pyc and b/modules/__pycache__/image_classification.cpython-311.pyc differ
 
modules/__pycache__/multilabel_classification.cpython-311.pyc CHANGED
Binary files a/modules/__pycache__/multilabel_classification.cpython-311.pyc and b/modules/__pycache__/multilabel_classification.cpython-311.pyc differ
 
modules/binary_classification.py CHANGED
@@ -4,8 +4,6 @@ import re
4
  import keras.models as models
5
  from nltk.corpus import stopwords
6
  import nltk
7
- import os
8
- from flask import jsonify
9
  import modules.utilities.utils as utils
10
 
11
  BASE_PATH = './data/'
 
4
  import keras.models as models
5
  from nltk.corpus import stopwords
6
  import nltk
 
 
7
  import modules.utilities.utils as utils
8
 
9
  BASE_PATH = './data/'
modules/image_classification.py CHANGED
@@ -1,9 +1,6 @@
1
  import cv2
2
  import numpy as np
3
  import keras.models as models
4
- from flask import jsonify
5
- import os
6
- from modules.utilities.utils import allowed_model
7
 
8
  IMAGE_SIZE = (224, 224)
9
  class_names = ['Tubercolosi', 'No_Tubercolosi', 'Pneumonia', 'No_Pneumonia']
 
1
  import cv2
2
  import numpy as np
3
  import keras.models as models
 
 
 
4
 
5
  IMAGE_SIZE = (224, 224)
6
  class_names = ['Tubercolosi', 'No_Tubercolosi', 'Pneumonia', 'No_Pneumonia']
modules/multilabel_classification.py CHANGED
@@ -1,5 +1,3 @@
1
- import os
2
- from flask import jsonify
3
  from keras_preprocessing.sequence import pad_sequences
4
  import modules.utilities.utils as utils
5
  import keras.models as models
@@ -21,10 +19,20 @@ def predict(model_path, tokenizer_path, sentence):
21
  tokenizer = utils.load_tokenizer(tokenizer_path)
22
  x_data = []
23
  x_data.append(sentence)
 
 
24
  x_tokenized = tokenizer.texts_to_sequences(x_data)
 
 
25
  x_pad = pad_sequences(x_tokenized, maxlen=200)
 
 
26
  x_t = x_pad[0]
 
 
27
  prediction = model.predict(np.array([x_t]))
 
 
28
  #predicted_label = class_names[np.argmax(prediction[0])]
29
  return prediction#, predicted_label
30
 
 
 
 
1
  from keras_preprocessing.sequence import pad_sequences
2
  import modules.utilities.utils as utils
3
  import keras.models as models
 
19
  tokenizer = utils.load_tokenizer(tokenizer_path)
20
  x_data = []
21
  x_data.append(sentence)
22
+ print('x_data')
23
+ print(x_data)
24
  x_tokenized = tokenizer.texts_to_sequences(x_data)
25
+ print('x_tokenized')
26
+ print(x_tokenized)
27
  x_pad = pad_sequences(x_tokenized, maxlen=200)
28
+ print('x_pad')
29
+ print(x_pad)
30
  x_t = x_pad[0]
31
+ print('x_t')
32
+ print(x_t)
33
  prediction = model.predict(np.array([x_t]))
34
+ print('prediction')
35
+ print(prediction)
36
  #predicted_label = class_names[np.argmax(prediction[0])]
37
  return prediction#, predicted_label
38