DevikaRenjith commited on
Commit
2b52c26
·
1 Parent(s): c482966

Upload 17 files

Browse files
BP_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85c4ca1f3bff7067955a82ec449853bb472478d67912175cbf7b1e53314800fa
3
+ size 4316
BackPropagation/BackPropagation.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from tqdm import tqdm
3
+
4
+
5
+ class BackPropogation:
6
+ def __init__(self,learning_rate=0.01, epochs=100,activation_function='step'):
7
+ self.bias = 0
8
+ self.learning_rate = learning_rate
9
+ self.max_epochs = epochs
10
+ self.activation_function = activation_function
11
+
12
+
13
+ def activate(self, x):
14
+ if self.activation_function == 'step':
15
+ return 1 if x >= 0 else 0
16
+ elif self.activation_function == 'sigmoid':
17
+ return 1 if (1 / (1 + np.exp(-x)))>=0.5 else 0
18
+ elif self.activation_function == 'relu':
19
+ return 1 if max(0,x)>=0.5 else 0
20
+
21
+ def fit(self, X, y):
22
+ error_sum=0
23
+ n_features = X.shape[1]
24
+ self.weights = np.zeros((n_features))
25
+ for epoch in tqdm(range(self.max_epochs)):
26
+ for i in range(len(X)):
27
+ inputs = X[i]
28
+ target = y[i]
29
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
30
+ prediction = self.activate(weighted_sum)
31
+
32
+ # Calculating loss and updating weights.
33
+ error = target - prediction
34
+ self.weights += self.learning_rate * error * inputs
35
+ self.bias += self.learning_rate * error
36
+
37
+ print(f"Updated Weights after epoch {epoch} with {self.weights}")
38
+ print("Training Completed")
39
+
40
+ def predict(self, X):
41
+ predictions = []
42
+ for i in range(len(X)):
43
+ inputs = X[i]
44
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
45
+ prediction = self.activate(weighted_sum)
46
+ predictions.append(prediction)
47
+ return predictions
48
+
49
+
50
+
51
+
52
+
53
+
BackPropagation/__init__.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from tqdm import tqdm
3
+
4
+
5
+ class BackPropogation:
6
+ def __init__(self,learning_rate=0.01, epochs=100,activation_function='step'):
7
+ self.bias = 0
8
+ self.learning_rate = learning_rate
9
+ self.max_epochs = epochs
10
+ self.activation_function = activation_function
11
+
12
+
13
+ def activate(self, x):
14
+ if self.activation_function == 'step':
15
+ return 1 if x >= 0 else 0
16
+ elif self.activation_function == 'sigmoid':
17
+ return 1 if (1 / (1 + np.exp(-x)))>=0.5 else 0
18
+ elif self.activation_function == 'relu':
19
+ return 1 if max(0,x)>=0.5 else 0
20
+
21
+ def fit(self, X, y):
22
+ error_sum=0
23
+ n_features = X.shape[1]
24
+ self.weights = np.zeros((n_features))
25
+ for epoch in tqdm(range(self.max_epochs)):
26
+ for i in range(len(X)):
27
+ inputs = X[i]
28
+ target = y[i]
29
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
30
+ prediction = self.activate(weighted_sum)
31
+
32
+ # Calculating loss and updating weights.
33
+ error = target - prediction
34
+ self.weights += self.learning_rate * error * inputs
35
+ self.bias += self.learning_rate * error
36
+
37
+ print(f"Updated Weights after epoch {epoch} with {self.weights}")
38
+ print("Training Completed")
39
+
40
+ def predict(self, X):
41
+ predictions = []
42
+ for i in range(len(X)):
43
+ inputs = X[i]
44
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
45
+ prediction = self.activate(weighted_sum)
46
+ predictions.append(prediction)
47
+ return predictions
48
+
49
+
50
+
51
+
52
+
53
+
BackPropagation/__pycache__/BackPropagation.cpython-311.pyc ADDED
Binary file (3.13 kB). View file
 
BackPropagation/imdb_backpropagation.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tensorflow.keras.datasets import imdb
2
+ from BackPropagation import BackPropogation
3
+ from tensorflow.keras.preprocessing.sequence import pad_sequences
4
+ from sklearn.metrics import accuracy_score
5
+ import pickle
6
+
7
+ top_words = 5000
8
+ (X_train, y_train), (X_test,y_test) = imdb.load_data(num_words=top_words)
9
+
10
+ max_review_length = 500
11
+
12
+ X_train = pad_sequences(X_train, maxlen=max_review_length)
13
+ X_test = pad_sequences(X_test, maxlen=max_review_length)
14
+
15
+ backprop = BackPropogation(epochs=100,learning_rate=0.01,activation_function='sigmoid')
16
+ backprop.fit(X_train, y_train)
17
+ pred = backprop.predict(X_test)
18
+ print(f"Accuracy : {accuracy_score(pred, y_test)}")
19
+
20
+ # Save the model using pickle
21
+ with open('BP_model.pkl', 'wb') as file:
22
+ pickle.dump(backprop, file)
23
+
24
+ # Load the model back using pickle
25
+ with open('BP_model.pkl', 'rb') as file:
26
+ model = pickle.load(file)
DNN/dnn_main.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tensorflow.keras.models import Sequential
2
+ from tensorflow.keras.layers import Dense, Embedding, Flatten
3
+ from tensorflow.keras.datasets import imdb
4
+ from tensorflow.keras.preprocessing.sequence import pad_sequences
5
+
6
+
7
+ top_words = 5000
8
+ (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
9
+
10
+ max_review_length = 500
11
+ X_train = pad_sequences(X_train, maxlen=max_review_length)
12
+ X_test = pad_sequences(X_test, maxlen=max_review_length)
13
+
14
+ # Modelling a sample DNN
15
+ model = Sequential()
16
+ model.add(Embedding(input_dim=top_words, output_dim=24, input_length=max_review_length))
17
+ model.add(Flatten())
18
+ model.add(Dense(64, activation='relu'))
19
+ model.add(Dense(32, activation='relu'))
20
+ model.add(Dense(16, activation='relu'))
21
+ model.add(Dense(1, activation='sigmoid'))
22
+
23
+ # opt=Adam(learning_rate=0.001)
24
+ model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
25
+ model.summary()
26
+
27
+ print("Training Started.")
28
+ history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=20)
29
+
30
+ loss, acc = model.evaluate(X_test, y_test)
31
+ print("Training Finished.")
32
+
33
+ print(f'Test Accuracy: {round(acc * 100)}')
34
+
35
+ model.save(r'C:\Users\HP\Desktop\Devika_streamlit\DNN_model.h5')
36
+
37
+
38
+
39
+
DNN_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dd59e62b447b930b18093e224929984a5cb258f34996c25ac39dd0a10cdeb1f
3
+ size 10735120
LSTM/simplelstm.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LSTM for sequence classification in the IMDB dataset
2
+ import tensorflow as tf
3
+ from tensorflow.keras.datasets import imdb
4
+ from tensorflow.keras.models import Sequential
5
+ from tensorflow.keras.layers import Dense
6
+ from tensorflow.keras.layers import LSTM
7
+ from tensorflow.keras.layers import Embedding
8
+ from tensorflow.keras.preprocessing import sequence
9
+
10
+
11
+ # fix random seed for reproducibility
12
+ tf.random.set_seed(7)
13
+ # load the dataset but only keep the top n words, zero the rest
14
+ top_words = 5000
15
+ (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
16
+ # truncate and pad input sequences
17
+ max_review_length = 500
18
+ X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
19
+ X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
20
+ # create the model
21
+ embedding_vecor_length = 32
22
+ model = Sequential()
23
+ model.add(Embedding(top_words, embedding_vecor_length, input_length=max_review_length))
24
+ model.add(LSTM(200))
25
+ model.add(Dense(1, activation='sigmoid'))
26
+ model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
27
+ print(model.summary())
28
+ model.fit(X_train, y_train, epochs=1, batch_size=64)
29
+ # Final evaluation of the model
30
+ scores = model.evaluate(X_test, y_test, verbose=0)
31
+ print("Accuracy: %.2f%%" % (scores[1]*100))
32
+
33
+ model.save(r'C:\Users\HP\Desktop\Devika_streamlit\LSTM_model.h5')
34
+
LSTM_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:177b9e18bbaf02ab907356929bab3a307040fe041955b50c04870d62a9863e2d
3
+ size 4194296
PP_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84bf4ef3840f8dbc7f2d8d9a7efa66fdbdda5a777c3a5d5f0d931e98e76e6e98
3
+ size 2264
Perceptron/Perceptron.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from tqdm import tqdm
3
+
4
+
5
+ class Perceptron:
6
+
7
+ def __init__(self,learning_rate=0.01, epochs=100,activation_function='step'):
8
+ self.bias = 0
9
+ self.learning_rate = learning_rate
10
+ self.max_epochs = epochs
11
+ self.activation_function = activation_function
12
+
13
+
14
+ def activate(self, x):
15
+ if self.activation_function == 'step':
16
+ return 1 if x >= 0 else 0
17
+ elif self.activation_function == 'sigmoid':
18
+ return 1 if (1 / (1 + np.exp(-x)))>=0.5 else 0
19
+ elif self.activation_function == 'relu':
20
+ return 1 if max(0,x)>=0.5 else 0
21
+
22
+ def fit(self, X, y):
23
+ n_features = X.shape[1]
24
+ self.weights = np.random.randint(n_features, size=(n_features))
25
+ for epoch in tqdm(range(self.max_epochs)):
26
+ for i in range(len(X)):
27
+ inputs = X[i]
28
+ target = y[i]
29
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
30
+ prediction = self.activate(weighted_sum)
31
+ print("Training Completed")
32
+
33
+ def predict(self, X):
34
+ predictions = []
35
+ for i in range(len(X)):
36
+ inputs = X[i]
37
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
38
+ prediction = self.activate(weighted_sum)
39
+ predictions.append(prediction)
40
+ return predictions
41
+
42
+
43
+
44
+
45
+
46
+
Perceptron/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from tqdm import tqdm
3
+
4
+
5
+ class Perceptron:
6
+
7
+ def __init__(self,learning_rate=0.01, epochs=100,activation_function='step'):
8
+ self.bias = 0
9
+ self.learning_rate = learning_rate
10
+ self.max_epochs = epochs
11
+ self.activation_function = activation_function
12
+
13
+
14
+ def activate(self, x):
15
+ if self.activation_function == 'step':
16
+ return 1 if x >= 0 else 0
17
+ elif self.activation_function == 'sigmoid':
18
+ return 1 if (1 / (1 + np.exp(-x)))>=0.5 else 0
19
+ elif self.activation_function == 'relu':
20
+ return 1 if max(0,x)>=0.5 else 0
21
+
22
+ def fit(self, X, y):
23
+ n_features = X.shape[1]
24
+ self.weights = np.random.randint(n_features, size=(n_features))
25
+ for epoch in tqdm(range(self.max_epochs)):
26
+ for i in range(len(X)):
27
+ inputs = X[i]
28
+ target = y[i]
29
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
30
+ prediction = self.activate(weighted_sum)
31
+ print("Training Completed")
32
+
33
+ def predict(self, X):
34
+ predictions = []
35
+ for i in range(len(X)):
36
+ inputs = X[i]
37
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
38
+ prediction = self.activate(weighted_sum)
39
+ predictions.append(prediction)
40
+ return predictions
41
+
42
+
43
+
44
+
45
+
46
+
Perceptron/__pycache__/Perceptron.cpython-311.pyc ADDED
Binary file (2.82 kB). View file
 
Perceptron/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (2.82 kB). View file
 
Perceptron/imdb_perceptron.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tensorflow.keras.datasets import imdb
2
+ from Perceptron import Perceptron
3
+ from tensorflow.keras.preprocessing.sequence import pad_sequences
4
+ from sklearn.metrics import accuracy_score
5
+ import pickle
6
+
7
+ top_words = 5000
8
+ (X_train, y_train), (X_test,y_test) = imdb.load_data(num_words=top_words)
9
+
10
+ max_review_length = 500
11
+ X_train = pad_sequences(X_train, maxlen=max_review_length)
12
+ X_test = pad_sequences(X_test, maxlen=max_review_length)
13
+
14
+ percep = Perceptron(epochs=100)
15
+
16
+ percep.fit(X_train, y_train)
17
+ pred = percep.predict(X_test)
18
+
19
+ print(f"Accuracy : {accuracy_score(pred, y_test)}")
20
+
21
+ # Save the model using pickle
22
+ with open('PP_model.pkl', 'wb') as file:
23
+ pickle.dump(percep, file)
24
+
25
+ # Load the model back using pickle
26
+ with open('PP_model.pkl', 'rb') as file:
27
+ model= pickle.load(file)
RNN/imdb_rnn.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import matplotlib.pyplot as plt
3
+ import seaborn as sns
4
+ from sklearn.model_selection import train_test_split
5
+ import tensorflow as tf
6
+ from numpy import argmax
7
+ from tensorflow.keras import Sequential
8
+ from tensorflow.keras.layers import Dense
9
+ from tensorflow.keras.optimizers import RMSprop, Adam
10
+ from tensorflow.keras.datasets import imdb
11
+ from tensorflow.keras.preprocessing.sequence import pad_sequences
12
+ from sklearn.metrics import accuracy_score
13
+
14
+
15
+ top_words = 5000
16
+ (X_train, y_train), (X_test,y_test) = imdb.load_data(num_words=top_words)
17
+
18
+ max_review_length = 500
19
+ X_train = pad_sequences(X_train, maxlen=max_review_length)
20
+ X_test = pad_sequences(X_test, maxlen=max_review_length)
21
+
22
+ model=tf.keras.models.Sequential([
23
+ tf.keras.layers.Embedding(input_dim=top_words,output_dim= 24, input_length=max_review_length),
24
+ tf.keras.layers.SimpleRNN(24, return_sequences=False),
25
+ tf.keras.layers.Dense(64, activation='relu'),
26
+ tf.keras.layers.Dense(32, activation='relu'),
27
+ tf.keras.layers.Dense(1, activation='sigmoid')
28
+ ])
29
+
30
+ # compile the model
31
+ model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
32
+
33
+ print("---------------------- -------------------------\n")
34
+
35
+ # summarize the model
36
+ print(model.summary())
37
+
38
+ print("---------------------- -------------------------\n")
39
+
40
+ early_stop = tf.keras.callbacks.EarlyStopping(monitor='accuracy', mode='min', patience=10)
41
+
42
+ print("---------------------- Training -------------------------\n")
43
+
44
+ # fit the model
45
+ model.fit(x=X_train,
46
+ y=y_train,
47
+ epochs=10,
48
+ validation_data=(X_test, y_test),
49
+ callbacks=[early_stop]
50
+ )
51
+ print("---------------------- -------------------------\n")
52
+
53
+
54
+ def acc_report(y_true, y_pred):
55
+ acc_sc = accuracy_score(y_true, y_pred)
56
+ print(f"Accuracy : {str(round(acc_sc,2)*100)}")
57
+ return acc_sc
58
+
59
+
60
+ preds = (model.predict(X_test) > 0.5).astype("int32")
61
+ print(acc_report(y_test, preds))
62
+
63
+ model.save(r'C:\Users\HP\Desktop\Devika_streamlit\RNN_model.h5')
RNN_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4da4a9cf95636729817b5c93922bab236954038ac366f5260c02b01a8364ef4c
3
+ size 1548440