anamikau commited on
Commit
3a12dd0
·
1 Parent(s): fa3716a

Upload 21 files

Browse files
BackPropogation.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from tqdm import tqdm
3
+
4
+
5
+ class BackPropogation:
6
+ def __init__(self,learning_rate=0.01, epochs=100,activation_function='step'):
7
+ self.bias = 0
8
+ self.learning_rate = learning_rate
9
+ self.max_epochs = epochs
10
+ self.activation_function = activation_function
11
+
12
+
13
+ def activate(self, x):
14
+ if self.activation_function == 'step':
15
+ return 1 if x >= 0 else 0
16
+ elif self.activation_function == 'sigmoid':
17
+ return 1 if (1 / (1 + np.exp(-x)))>=0.5 else 0
18
+ elif self.activation_function == 'relu':
19
+ return 1 if max(0,x)>=0.5 else 0
20
+
21
+ def fit(self, X, y):
22
+ error_sum=0
23
+ n_features = X.shape[1]
24
+ self.weights = np.zeros((n_features))
25
+ for epoch in tqdm(range(self.max_epochs)):
26
+ for i in range(len(X)):
27
+ inputs = X[i]
28
+ target = y[i]
29
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
30
+ prediction = self.activate(weighted_sum)
31
+
32
+ # Calculating loss and updating weights.
33
+ error = target - prediction
34
+ self.weights += self.learning_rate * error * inputs
35
+ self.bias += self.learning_rate * error
36
+
37
+ print(f"Updated Weights after epoch {epoch} with {self.weights}")
38
+ print("Training Completed")
39
+
40
+ def predict(self, X):
41
+ predictions = []
42
+ for i in range(len(X)):
43
+ inputs = X[i]
44
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
45
+ prediction = self.activate(weighted_sum)
46
+ predictions.append(prediction)
47
+ return predictions
48
+
49
+
50
+
51
+
52
+
53
+
Perceptron.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from tqdm import tqdm
3
+
4
+
5
+ class Perceptron:
6
+
7
+ def __init__(self,learning_rate=0.01, epochs=100,activation_function='step'):
8
+ self.bias = 0
9
+ self.learning_rate = learning_rate
10
+ self.max_epochs = epochs
11
+ self.activation_function = activation_function
12
+
13
+
14
+ def activate(self, x):
15
+ if self.activation_function == 'step':
16
+ return 1 if x >= 0 else 0
17
+ elif self.activation_function == 'sigmoid':
18
+ return 1 if (1 / (1 + np.exp(-x)))>=0.5 else 0
19
+ elif self.activation_function == 'relu':
20
+ return 1 if max(0,x)>=0.5 else 0
21
+
22
+ def fit(self, X, y):
23
+ n_features = X.shape[1]
24
+ self.weights = np.random.randint(n_features, size=(n_features))
25
+ for epoch in tqdm(range(self.max_epochs)):
26
+ for i in range(len(X)):
27
+ inputs = X[i]
28
+ target = y[i]
29
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
30
+ prediction = self.activate(weighted_sum)
31
+ print("Training Completed")
32
+
33
+ def predict(self, X):
34
+ predictions = []
35
+ for i in range(len(X)):
36
+ inputs = X[i]
37
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
38
+ prediction = self.activate(weighted_sum)
39
+ predictions.append(prediction)
40
+ return predictions
41
+
42
+
43
+
44
+
45
+
46
+
backpropogation_.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sklearn.model_selection import train_test_split
2
+ import pandas as pd
3
+ import tensorflow as tf
4
+ from tensorflow.keras.preprocessing import sequence
5
+ from BackPropogation import BackPropogation
6
+ import pickle
7
+
8
+ dataset = pd.read_csv(r"IMDB Dataset.csv")
9
+
10
+ dataset['sentiment'] = dataset['sentiment'].map( {'negative': 1, 'positive': 0} )
11
+ X = dataset['review'].values
12
+ y = dataset['sentiment'].values
13
+
14
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=42)
15
+ tokeniser = tf.keras.preprocessing.text.Tokenizer()
16
+ tokeniser.fit_on_texts(X_train)
17
+ X_train = tokeniser.texts_to_sequences(X_train)
18
+ X_test = tokeniser.texts_to_sequences(X_test)
19
+
20
+ max_review_length = 500
21
+ X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
22
+ X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
23
+
24
+ backprop = BackPropogation(learning_rate=0.01, epochs=5, activation_function='sigmoid')
25
+ backprop.fit(X_train, y_train)
26
+ pred = backprop.predict(X_test)
27
+
28
+ with open("bp_model.pkl",'wb') as file:
29
+ pickle.dump(backprop, file)
30
+ with open("bp_tokeniser.pkl",'wb') as file:
31
+ pickle.dump(tokeniser, file)
bp_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2898ac4c9ef15f477f4bd8ac49b1ae1357b92e6d8867b14c0b05ec7a4ea45149
3
+ size 4300
bp_tokeniser.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25c055c7c91a7c5d0afa96ed3f102e256329c9b6dca921ec2021a7c19c9993bf
3
+ size 4992453
cnn_tumor.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import matplotlib.pyplot as plt
3
+ import cv2
4
+ import os
5
+ import tensorflow as tf
6
+ from PIL import Image
7
+ from sklearn.model_selection import train_test_split
8
+ from tqdm import tqdm
9
+ import pickle
10
+
11
+ image_dir = r"tumordataset\tumordata"
12
+ no_tumor_images = os.listdir(image_dir + '/no')
13
+ yes_tumor_images = os.listdir(image_dir + '/yes')
14
+
15
+ dataset = []
16
+ label = []
17
+ img_siz = (128, 128)
18
+
19
+ for i, image_name in tqdm(enumerate(no_tumor_images), desc="No Tumor"):
20
+ if image_name.split('.')[1] == 'jpg':
21
+ image = cv2.imread(image_dir + '/no/' + image_name)
22
+ image = Image.fromarray(image, 'RGB')
23
+ image = image.resize(img_siz)
24
+ dataset.append(np.array(image))
25
+ label.append(0)
26
+
27
+ for i, image_name in tqdm(enumerate(yes_tumor_images), desc="Tumor"):
28
+ if image_name.split('.')[1] == 'jpg':
29
+ image = cv2.imread(image_dir + '/yes/' + image_name)
30
+ image = Image.fromarray(image, 'RGB')
31
+ image = image.resize(img_siz)
32
+ dataset.append(np.array(image))
33
+ label.append(1)
34
+
35
+ dataset = np.array(dataset)
36
+ label = np.array(label)
37
+
38
+ x_train, x_test, y_train, y_test = train_test_split(dataset, label, test_size=0.2, random_state=42)
39
+
40
+ x_train = tf.keras.utils.normalize(x_train, axis=1)
41
+ x_test = tf.keras.utils.normalize(x_test, axis=1)
42
+
43
+ model = tf.keras.models.Sequential([
44
+ tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(128, 128, 3)),
45
+ tf.keras.layers.MaxPooling2D((2, 2)),
46
+ tf.keras.layers.Flatten(),
47
+ tf.keras.layers.Dense(256, activation='relu'),
48
+ tf.keras.layers.Dropout(.5),
49
+ tf.keras.layers.Dense(512, activation='relu'),
50
+ tf.keras.layers.Dense(1, activation='sigmoid')
51
+ ])
52
+ model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
53
+ history = model.fit(x_train, y_train, epochs=5, batch_size=128, validation_split=0.1)
54
+
55
+ model.save("tumor_detection_model.h5")
56
+
57
+ with open("tumor_detection_model.pkl", "wb") as model_file:
58
+ pickle.dump(model, model_file)
59
+
60
+
dnn_main.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from sklearn.preprocessing import LabelEncoder
3
+ from sklearn.model_selection import train_test_split
4
+ from numpy import argmax
5
+ import tensorflow as tf
6
+ from tensorflow.keras import Sequential
7
+ from tensorflow.keras.layers import Dense
8
+ from tensorflow.keras.optimizers import Adam
9
+ from tensorflow.keras.preprocessing import sequence
10
+ from tensorflow.keras.models import save_model
11
+ from tensorflow.keras.preprocessing.text import Tokenizer
12
+ import pickle
13
+
14
+ dataset = pd.read_csv(r"IMDB Dataset.csv")
15
+
16
+ dataset['sentiment'] = dataset['sentiment'].map( {'negative': 1, 'positive': 0} )
17
+ X = dataset['review'].values
18
+ y = dataset['sentiment'].values
19
+
20
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,random_state=42)
21
+ tokeniser = tf.keras.preprocessing.text.Tokenizer()
22
+ tokeniser.fit_on_texts(X_train)
23
+ X_train = tokeniser.texts_to_sequences(X_train)
24
+ X_test = tokeniser.texts_to_sequences(X_test)
25
+
26
+ vocab_size = len(tokeniser.word_index)+1
27
+
28
+ max_review_length = 500
29
+ X_train = sequence.pad_sequences(X_train, maxlen=max_review_length, padding = 'post')
30
+ X_test = sequence.pad_sequences(X_test, maxlen=max_review_length, padding = 'post')
31
+
32
+ n_features = X_train.shape[1]
33
+
34
+ #Modelling a sample DNN
35
+ model = Sequential()
36
+ model.add(Dense(64, activation='relu',input_shape=(500,)))
37
+ model.add(Dense(32, activation='relu'))
38
+ model.add(Dense(16, activation='relu'))
39
+ model.add(Dense(1,activation='sigmoid'))
40
+
41
+ opt=Adam(learning_rate=0.01)
42
+ model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
43
+
44
+ history=model.fit(X_train, y_train, epochs=50, batch_size=16)
45
+ loss, acc = model.evaluate(X_test, y_test)
46
+
47
+ model.save("dnn_model.h5")
48
+
49
+ with open("dnn_tokeniser.pkl",'wb') as file:
50
+ pickle.dump(tokeniser, file)
dnn_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3c36881e26aefd7e17dff29023d03335beb6eb45243e4490f445244f8b2f648
3
+ size 457224
dnn_tokeniser.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebf774f92712879256b5165914d4d1d0bf0f85c3e67d9c3c522bfc979f0886c6
3
+ size 4534143
lstm_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de29fa795ade36693d1309892b37d6b539402b20af46955ae097db8b0499a995
3
+ size 41224696
lstm_tokeniser.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8e9e9be913c579156de078e58d31608f31072b6dec4546314db2c78670d20df
3
+ size 4534143
perceptron _.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sklearn.model_selection import train_test_split
2
+ import pandas as pd
3
+ import tensorflow as tf
4
+ from tensorflow.keras.preprocessing import sequence
5
+ from Perceptron import Perceptron
6
+ import pickle
7
+
8
+
9
+ from sklearn.metrics import accuracy_score
10
+ from sklearn.metrics import classification_report
11
+
12
+ dataset = pd.read_csv(r"IMDB Dataset.csv")
13
+
14
+ dataset['sentiment'] = dataset['sentiment'].map( {'negative': 1, 'positive': 0} )
15
+ X = dataset['review'].values
16
+ y = dataset['sentiment'].values
17
+
18
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
19
+ tokeniser = tf.keras.preprocessing.text.Tokenizer()
20
+ tokeniser.fit_on_texts(X_train)
21
+ X_train = tokeniser.texts_to_sequences(X_train)
22
+ X_test = tokeniser.texts_to_sequences(X_test)
23
+
24
+ max_review_length = 500
25
+ X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
26
+ X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
27
+
28
+ perceptron = Perceptron(epochs=10,activation_function='sigmoid')
29
+
30
+ perceptron.fit(X_train, y_train)
31
+ pred = perceptron.predict(X_test)
32
+
33
+ print(f"Accuracy : {accuracy_score(pred, y_test)}")
34
+ report = classification_report(pred, y_test, digits=2)
35
+
36
+ print(report)
37
+
38
+ with open("ppn_model.pkl",'wb') as file:
39
+ pickle.dump(perceptron, file)
40
+ with open("ppn_tokeniser.pkl",'wb') as file:
41
+ pickle.dump(tokeniser, file)
ppn_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bb3d5d38fb9f98070b4f0650babc7a29ddfc80d75b2d8ef7380667e0140878
3
+ size 2267
ppn_tokeniser.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d7506a525eaab4aefbb35d6774694ff0479b70f8aa33d4e29d41fc5b3d9adb0
3
+ size 4848716
simplelstm.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import pandas as pd
3
+ from tensorflow.keras.models import Sequential
4
+ from tensorflow.keras.layers import Dense
5
+ from tensorflow.keras.layers import LSTM
6
+ from tensorflow.keras.layers import Embedding
7
+ from tensorflow.keras.preprocessing import sequence
8
+ from sklearn.model_selection import train_test_split
9
+ from tensorflow.keras.models import save_model
10
+ from tensorflow.keras.preprocessing.text import Tokenizer
11
+
12
+ import pickle
13
+
14
+ dataset = pd.read_csv(r"IMDB Dataset.csv")
15
+
16
+ dataset['sentiment'] = dataset['sentiment'].map( {'negative': 1, 'positive': 0} )
17
+ X = dataset['review'].values
18
+ y = dataset['sentiment'].values
19
+
20
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
21
+ tokeniser = tf.keras.preprocessing.text.Tokenizer()
22
+ tokeniser.fit_on_texts(X_train)
23
+ X_train = tokeniser.texts_to_sequences(X_train)
24
+ X_test = tokeniser.texts_to_sequences(X_test)
25
+ print(X_train[0:2])
26
+
27
+ vocab_size = len(tokeniser.word_index)+1
28
+
29
+ max_review_length = 500
30
+ X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
31
+ X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
32
+
33
+ embedding_vector_length = 32
34
+ model = Sequential()
35
+ model.add(Embedding(vocab_size, embedding_vector_length, input_length=max_review_length))
36
+ model.add(LSTM(100))
37
+ model.add(Dense(1, activation='sigmoid'))
38
+ model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
39
+ model.fit(X_train, y_train, epochs=3, batch_size=64)
40
+
41
+ scores = model.evaluate(X_test, y_test, verbose=0)
42
+ print("Accuracy: %.2f%%" % (scores[1]*100))
43
+
44
+ model.save("lstm_model.h5")
45
+ with open("lstm_tokeniser.pkl",'wb') as file:
46
+ pickle.dump(tokeniser, file)
47
+
48
+
smsspam.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from sklearn.model_selection import train_test_split
3
+ import tensorflow as tf
4
+ from keras.models import load_model
5
+ from tensorflow.keras.models import save_model
6
+ from tensorflow.keras.preprocessing.text import Tokenizer
7
+ import pickle
8
+
9
+ dataset = pd.read_csv(r"SMSSpamCollection.txt",sep='\t',names=['label','message'])
10
+
11
+ dataset['label'] = dataset['label'].map( {'spam': 1, 'ham': 0} )
12
+ X = dataset['message'].values
13
+ y = dataset['label'].values
14
+
15
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
16
+ tokeniser = tf.keras.preprocessing.text.Tokenizer()
17
+ tokeniser.fit_on_texts(X_train)
18
+ encoded_train = tokeniser.texts_to_sequences(X_train)
19
+ encoded_test = tokeniser.texts_to_sequences(X_test)
20
+
21
+ max_length = 10
22
+ padded_train = tf.keras.preprocessing.sequence.pad_sequences(encoded_train, maxlen=max_length, padding='post')
23
+ padded_test = tf.keras.preprocessing.sequence.pad_sequences(encoded_test, maxlen=max_length, padding='post')
24
+
25
+ vocab_size = len(tokeniser.word_index)+1
26
+
27
+ # define the model
28
+
29
+ model=tf.keras.models.Sequential([
30
+ tf.keras.layers.Embedding(input_dim=vocab_size,output_dim= 24, input_length=max_length),
31
+ tf.keras.layers.SimpleRNN(24, return_sequences=False),
32
+ tf.keras.layers.Dense(64, activation='relu'),
33
+ tf.keras.layers.Dense(32, activation='relu'),
34
+ tf.keras.layers.Dense(1, activation='sigmoid')
35
+ ])
36
+
37
+ # compile the model
38
+ model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
39
+
40
+ # summarize the model
41
+
42
+ early_stop = tf.keras.callbacks.EarlyStopping(monitor='accuracy', mode='min', patience=10)
43
+
44
+
45
+ # fit the model
46
+ model.fit(x=padded_train,
47
+ y=y_train,
48
+ epochs=50,
49
+ validation_data=(padded_test, y_test),
50
+ callbacks=[early_stop]
51
+ )
52
+
53
+ preds = (model.predict(padded_test) > 0.5).astype("int32")
54
+
55
+ model_filename = "spam_model.h5"
56
+ model.save(model_filename)
57
+
58
+ # Save the tokenizer using pickle
59
+ tokeniser_filename = "spam_tokeniser.pkl"
60
+ with open(tokeniser_filename, 'wb') as tokeniser_file:
61
+ pickle.dump(tokeniser, tokeniser_file)
spam_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b5ff8cf0fe77a9f85f89d3d035ceb3e619e1ab45590d7ae0392f2900f929250
3
+ size 2269016
spam_tokeniser.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1824c47cdb0914f8dde5874e1ec6277a1129773154fde9db60887898327a8e6a
3
+ size 290462
tasks.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import streamlit as st
3
+ import numpy as np
4
+ import tensorflow as tf
5
+ from PIL import Image
6
+ import pickle
7
+
8
+
9
+
10
+ st.header('Neural Networks Demo')
11
+ task = st.selectbox('Select Task', ["Select One",'Sentiment Classification', 'Tumor Detection'])
12
+
13
+
14
+ if task == "Tumor Detection":
15
+ def cnn(img, model):
16
+ img = Image.open(img)
17
+ img = img.resize((128, 128))
18
+ img = np.array(img)
19
+ input_img = np.expand_dims(img, axis=0)
20
+ res = model.predict(input_img)
21
+ if res:
22
+ return "Tumor Detected"
23
+ else:
24
+ return "No Tumor"
25
+
26
+ cnn_model = tf.keras.models.load_model("tumor_detection_model.h5")
27
+ uploaded_file = st.file_uploader("Choose a file", type=["jpg", "jpeg", "png"])
28
+ if uploaded_file is not None:
29
+ st.image(uploaded_file, caption="Uploaded Image", use_column_width=True)
30
+ if st.button("Submit"):
31
+ result=cnn(uploaded_file, cnn_model)
32
+ st.write(result)
33
+
34
+
35
+ elif task == "Sentiment Classification":
36
+ types = ["Perceptron","BackPropagation", "RNN","DNN", "LSTM"]
37
+ input_text2 = st.radio("Select", types, horizontal=True)
38
+
39
+ if input_text2 == "Perceptron":
40
+ with open("ppn_model.pkl",'rb') as file:
41
+ perceptron = pickle.load(file)
42
+ with open("ppn_tokeniser.pkl",'rb') as file:
43
+ ppn_tokeniser = pickle.load(file)
44
+
45
+ def ppn_make_predictions(inp, model):
46
+ encoded_inp = ppn_tokeniser.texts_to_sequences([inp])
47
+ padded_inp = tf.keras.preprocessing.sequence.pad_sequences(encoded_inp, maxlen=500)
48
+ res = model.predict(padded_inp)
49
+ if res:
50
+ return "Negative"
51
+ else:
52
+ return "Positive"
53
+
54
+ st.subheader('Movie Review Classification using Perceptron')
55
+ inp = st.text_area('Enter message')
56
+ if st.button('Check'):
57
+ pred = ppn_make_predictions([inp], perceptron)
58
+ st.write(pred)
59
+
60
+ if input_text2 == "BackPropagation":
61
+ with open("bp_model.pkl",'rb') as file:
62
+ backprop = pickle.load(file)
63
+ with open("bp_tokeniser.pkl",'rb') as file:
64
+ bp_tokeniser = pickle.load(file)
65
+
66
+ def bp_make_predictions(inp, model):
67
+ encoded_inp = bp_tokeniser.texts_to_sequences([inp])
68
+ padded_inp = tf.keras.preprocessing.sequence.pad_sequences(encoded_inp, maxlen=500)
69
+ res = model.predict(padded_inp)
70
+ if res:
71
+ return "Negative"
72
+ else:
73
+ return "Positive"
74
+
75
+ st.subheader('Movie Review Classification using BackPropagation')
76
+ inp = st.text_area('Enter message')
77
+ if st.button('Check'):
78
+ pred = bp_make_predictions([inp], backprop)
79
+ st.write(pred)
80
+
81
+
82
+ elif input_text2 == "RNN":
83
+ rnn_model=tf.keras.models.load_model("spam_model.h5")
84
+ with open("spam_tokeniser.pkl", 'rb') as model_file:
85
+ rnn_tokeniser=pickle.load(model_file)
86
+
87
+ def rnn_make_predictions(inp, model):
88
+ encoded_inp = rnn_tokeniser.texts_to_sequences(inp)
89
+ padded_inp = tf.keras.preprocessing.sequence.pad_sequences(encoded_inp, maxlen=10, padding='post')
90
+ res = (model.predict(padded_inp) > 0.5).astype("int32")
91
+ if res:
92
+ return "Spam"
93
+ else:
94
+ return "Ham"
95
+
96
+ st.subheader('Spam message Classification using RNN')
97
+ input = st.text_area("Give message")
98
+ if st.button('Check'):
99
+ pred = rnn_make_predictions([input], rnn_model)
100
+ st.write(pred)
101
+
102
+
103
+
104
+ elif input_text2 == "DNN":
105
+ dnn_model=tf.keras.models.load_model("dnn_model.h5")
106
+ with open("dnn_tokeniser.pkl",'rb') as file:
107
+ dnn_tokeniser = pickle.load(file)
108
+
109
+ def dnn_make_predictions(inp, model):
110
+ inp = dnn_tokeniser.texts_to_sequences(inp)
111
+ inp = tf.keras.preprocessing.sequence.pad_sequences(inp, maxlen=500)
112
+ res = (model.predict(inp) > 0.5).astype("int32")
113
+ if res:
114
+ return "Negative"
115
+ else:
116
+ return "Positive"
117
+
118
+ st.subheader('Movie Review Classification using DNN')
119
+ inp = st.text_area('Enter message')
120
+ if st.button('Check'):
121
+ pred = dnn_make_predictions([inp], dnn_model)
122
+ st.write(pred)
123
+
124
+
125
+
126
+ elif input_text2 == "LSTM":
127
+ lstm_model=tf.keras.models.load_model("lstm_model.h5")
128
+
129
+ with open("lstm_tokeniser.pkl",'rb') as file:
130
+ lstm_tokeniser = pickle.load(file)
131
+
132
+ def lstm_make_predictions(inp, model):
133
+ inp = lstm_tokeniser.texts_to_sequences(inp)
134
+ inp = tf.keras.preprocessing.sequence.pad_sequences(inp, maxlen=500)
135
+ res = (model.predict(inp) > 0.5).astype("int32")
136
+ if res:
137
+ return "Negative"
138
+ else:
139
+ return "Positive"
140
+ st.subheader('Movie Review Classification using LSTM')
141
+ inp = st.text_area('Enter message')
142
+ if st.button('Check'):
143
+ pred = lstm_make_predictions([inp], lstm_model)
144
+ st.write(pred)
145
+
146
+
147
+
148
+
149
+
150
+
151
+
152
+
153
+
tumor_detection_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2557679ec6b175c2b8129f1a8752733876e3307f51027c7a2fe2e44c9fd5c891
3
+ size 391811360
tumor_detection_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:183abc8ff81fc6648ae103360f06acec2c1e1e845602dea48e5715ab148d865c
3
+ size 391803384