import numpy as np # Activation function: step function def step_function(x): return 1 if x >= 0 else 0 # Perceptron class class Perceptron: def __init__(self, input_size, learning_rate=0.1): self.weights = np.zeros(input_size + 1) # +1 for bias self.lr = learning_rate def predict(self, x): x = np.insert(x, 0, 1) # Add bias input weighted_sum = np.dot(self.weights, x) return step_function(weighted_sum) def train(self, X, y, epochs=10): for _ in range(epochs): for xi, target in zip(X, y): xi = np.insert(xi, 0, 1) # Add bias input prediction = self.predict(xi[1:]) error = target - prediction self.weights += self.lr * error * xi # Example: AND logic gate X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) y = np.array([0, 0, 0, 1]) model = Perceptron(input_size=2) model.train(X, y) # Test for x in X: print(f"Input: {x}, Output: {model.predict(x)}")