SandraPK commited on
Commit
457ad83
·
1 Parent(s): bf12787

Delete perceptron_code.py

Browse files
Files changed (1) hide show
  1. perceptron_code.py +0 -85
perceptron_code.py DELETED
@@ -1,85 +0,0 @@
1
- import numpy as np
2
- from sklearn.model_selection import train_test_split
3
- from sklearn.metrics import accuracy_score, classification_report
4
- import pickle
5
- from tensorflow.keras.datasets import imdb
6
- from tensorflow.keras.preprocessing import sequence
7
-
8
- import numpy as np
9
-
10
- class Perceptron:
11
- def __init__(self, input_size, epochs=100, learning_rate=0.01, activation_function='sigmoid'):
12
- self.weights = np.zeros(input_size) # Remove 1 for the bias term
13
- self.bias = 0
14
- self.epochs = epochs
15
- self.learning_rate = learning_rate
16
- self.activation_function = activation_function
17
-
18
- def activate(self, x):
19
- if self.activation_function == 'sigmoid':
20
- return 1 / (1 + np.exp(-x))
21
- elif self.activation_function == 'step':
22
- return np.where(x >= 0, 1, 0)
23
- else:
24
- raise ValueError(f"Unsupported activation function: {self.activation_function}")
25
-
26
- def fit(self, X, y):
27
- for epoch in range(self.epochs):
28
- for xi, target in zip(X, y):
29
- prediction = self.activate(np.dot(xi, self.weights) + self.bias)
30
- error = target - prediction
31
- self.weights += self.learning_rate * error * xi
32
- self.bias += self.learning_rate * error
33
-
34
- def predict(self, X):
35
- # Remove the column of ones for the bias term
36
- weighted_sum = np.dot(X, self.weights) + self.bias
37
- return self.activate(weighted_sum)
38
-
39
-
40
- def predict(self, X):
41
- # Remove the column of ones for the bias term
42
- weighted_sum = np.dot(X, self.weights) + self.bias
43
- return self.activate(weighted_sum)
44
-
45
-
46
-
47
- def save_model(perceptron):
48
- with open('perceptron_model.pkl', 'wb') as model_file:
49
- pickle.dump(perceptron, model_file)
50
-
51
- # Load the IMDB dataset
52
- top_words = 5000
53
- (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
54
-
55
- # Preprocess labels for binary classification
56
- y_train = np.array(y_train)
57
- y_test = np.array(y_test)
58
- y_train = np.where(y_train >= 7, 1, 0)
59
- y_test = np.where(y_test >= 7, 1, 0)
60
-
61
- # Normalize input data
62
- max_review_length = 500
63
- X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
64
- X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
65
-
66
- # Create and train the Perceptron
67
- input_size = X_train.shape[1]
68
- learning_rate = 0.01
69
- perceptron = Perceptron(input_size=input_size, epochs=10, learning_rate=learning_rate)
70
- perceptron.fit(X_train, y_train)
71
-
72
- # Save the trained model
73
- save_model(perceptron)
74
-
75
- # Make predictions
76
- pred = perceptron.predict(X_test)
77
-
78
- # Assuming pred contains probabilities
79
- threshold = 0.5
80
- binary_predictions = (pred > threshold).astype(int)
81
-
82
- # Now use binary_predictions for evaluation
83
- print(f"Accuracy: {accuracy_score(y_test, binary_predictions)}")
84
- report = classification_report(y_test, binary_predictions, digits=2)
85
- print(report)