| | import numpy as np
|
| | from sklearn.datasets import fetch_openml
|
| | from sklearn.model_selection import train_test_split
|
| | import numpy
|
| | import sklearn
|
| | import time
|
| | from datetime import timedelta
|
| | import pickle
|
| | import os
|
| | from PIL import Image
|
| | import tkinter as tk
|
| | from tkinter import filedialog
|
| |
|
| | print("NumPy版本:", numpy.__version__)
|
| | print("Scikit-learn版本:", sklearn.__version__)
|
| |
|
| | class NeuralNetwork:
|
| | def __init__(self, input_size, hidden_size1, hidden_size2, output_size):
|
| |
|
| |
|
| |
|
| |
|
| | self.weights1 = np.random.randn(input_size, hidden_size1) * 0.1
|
| | self.bias1 = np.zeros((1, hidden_size1))
|
| | self.weights2 = np.random.randn(hidden_size1, hidden_size2) * 0.1
|
| | self.bias2 = np.zeros((1, hidden_size2))
|
| | self.weights3 = np.random.randn(hidden_size2, output_size) * 0.1
|
| | self.bias3 = np.zeros((1, output_size))
|
| |
|
| | def sigmoid(self, x):
|
| | return 1 / (1 + np.exp(-x))
|
| |
|
| | def sigmoid_derivative(self, x):
|
| | return x * (1 - x)
|
| |
|
| | def softmax(self, x):
|
| | exp_x = np.exp(x - np.max(x, axis=1, keepdims=True))
|
| | return exp_x / np.sum(exp_x, axis=1, keepdims=True)
|
| |
|
| | def forward(self, X):
|
| |
|
| | self.layer1 = self.sigmoid(np.dot(X, self.weights1) + self.bias1)
|
| | self.layer2 = self.sigmoid(np.dot(self.layer1, self.weights2) + self.bias2)
|
| |
|
| | self.output = self.softmax(np.dot(self.layer2, self.weights3) + self.bias3)
|
| | return self.output
|
| |
|
| | def backward(self, X, y, learning_rate):
|
| | batch_size = X.shape[0]
|
| |
|
| |
|
| |
|
| | delta3 = self.output - y
|
| |
|
| |
|
| | delta2 = np.dot(delta3, self.weights3.T) * self.sigmoid_derivative(self.layer2)
|
| |
|
| |
|
| | delta1 = np.dot(delta2, self.weights2.T) * self.sigmoid_derivative(self.layer1)
|
| |
|
| |
|
| | self.weights3 -= learning_rate * np.dot(self.layer2.T, delta3) / batch_size
|
| | self.weights2 -= learning_rate * np.dot(self.layer1.T, delta2) / batch_size
|
| | self.weights1 -= learning_rate * np.dot(X.T, delta1) / batch_size
|
| |
|
| |
|
| |
|
| |
|
| | self.bias3 -= learning_rate * np.sum(delta3, axis=0, keepdims=True) / batch_size
|
| | self.bias2 -= learning_rate * np.sum(delta2, axis=0, keepdims=True) / batch_size
|
| | self.bias1 -= learning_rate * np.sum(delta1, axis=0, keepdims=True) / batch_size
|
| |
|
| | def save_model(self, filename):
|
| | """保存模型参数"""
|
| | model_params = {
|
| | 'weights1': self.weights1,
|
| | 'weights2': self.weights2,
|
| | 'weights3': self.weights3,
|
| | 'bias1': self.bias1,
|
| | 'bias2': self.bias2,
|
| | 'bias3': self.bias3
|
| | }
|
| | with open(filename, 'wb') as f:
|
| | pickle.dump(model_params, f)
|
| |
|
| | def load_model(self, filename):
|
| | """加载模型参数"""
|
| | with open(filename, 'rb') as f:
|
| | model_params = pickle.load(f)
|
| | self.weights1 = model_params['weights1']
|
| | self.weights2 = model_params['weights2']
|
| | self.weights3 = model_params['weights3']
|
| | self.bias1 = model_params['bias1']
|
| | self.bias2 = model_params['bias2']
|
| | self.bias3 = model_params['bias3']
|
| |
|
| | def predict(self, X):
|
| | """预测单个图像"""
|
| |
|
| | if len(X.shape) == 1:
|
| | X = X.reshape(1, -1)
|
| |
|
| | X = X / 255.0
|
| |
|
| | output = self.forward(X)
|
| |
|
| | return np.argmax(output, axis=1)[0]
|
| |
|
| | def preprocess_data():
|
| |
|
| | X, y = fetch_openml('mnist_784', version=1, return_X_y=True, as_frame=False)
|
| |
|
| |
|
| | X = X / 255.0
|
| |
|
| |
|
| | y_onehot = np.zeros((y.shape[0], 10))
|
| | for i in range(y.shape[0]):
|
| | y_onehot[i, int(y[i])] = 1
|
| |
|
| |
|
| | X_train, X_test, y_train, y_test = train_test_split(X, y_onehot, test_size=0.2, random_state=42)
|
| | return X_train, X_test, y_train, y_test
|
| |
|
| | def main():
|
| |
|
| | input_size = 784
|
| | hidden_size1 = 30
|
| | hidden_size2 = 60
|
| | output_size = 10
|
| | learning_rate = 0.1
|
| | epochs = 50
|
| | batch_size = 128
|
| |
|
| |
|
| | X_train, X_test, y_train, y_test = preprocess_data()
|
| |
|
| |
|
| | nn = NeuralNetwork(input_size, hidden_size1, hidden_size2, output_size)
|
| |
|
| | print("开始训练...")
|
| | start_time = time.time()
|
| | total_batches = len(X_train) // batch_size
|
| |
|
| |
|
| | for epoch in range(epochs):
|
| | epoch_start = time.time()
|
| |
|
| |
|
| | indices = np.random.permutation(len(X_train))
|
| | X_train = X_train[indices]
|
| | y_train = y_train[indices]
|
| |
|
| |
|
| | for i in range(0, len(X_train), batch_size):
|
| | if i % (batch_size * 20) == 0:
|
| | print(f'\rEpoch {epoch + 1}/{epochs} - 批次进度: {i//batch_size}/{total_batches}', end='')
|
| |
|
| | X_batch = X_train[i:i + batch_size]
|
| | y_batch = y_train[i:i + batch_size]
|
| |
|
| | nn.forward(X_batch)
|
| | nn.backward(X_batch, y_batch, learning_rate)
|
| |
|
| | epoch_time = time.time() - epoch_start
|
| |
|
| | if (epoch + 1) % 5 == 0:
|
| | train_predictions = np.argmax(nn.forward(X_train), axis=1)
|
| | train_true = np.argmax(y_train, axis=1)
|
| | train_accuracy = np.mean(train_predictions == train_true)
|
| |
|
| | test_predictions = np.argmax(nn.forward(X_test), axis=1)
|
| | test_true = np.argmax(y_test, axis=1)
|
| | test_accuracy = np.mean(test_predictions == test_true)
|
| |
|
| | print(f'\nEpoch {epoch + 1}/{epochs}')
|
| | print(f'每轮用时: {timedelta(seconds=int(epoch_time))}')
|
| | print(f'训练集准确率: {train_accuracy:.4f}')
|
| | print(f'测试集准确率: {test_accuracy:.4f}')
|
| |
|
| | total_time = time.time() - start_time
|
| | print(f'\n训练完成!总用时: {timedelta(seconds=int(total_time))}')
|
| |
|
| |
|
| | print("保存模型...")
|
| | nn.save_model('mnist_model.pkl')
|
| |
|
| |
|
| | def predict_digit(image_data):
|
| | """
|
| | 预测单个手写数字
|
| | image_data: 784维的numpy数组(28x28像素)
|
| | """
|
| |
|
| | nn = NeuralNetwork(784, 30, 60, 10)
|
| |
|
| | nn.load_model('mnist_model.pkl')
|
| |
|
| | return nn.predict(image_data)
|
| |
|
| | def load_image(image_path):
|
| | """
|
| | 加载并处理图片文件
|
| | image_path: 图片文件的路径
|
| | 返回: 处理后的图片数据(784维numpy数组)
|
| | """
|
| |
|
| | img = Image.open(image_path)
|
| |
|
| | img = img.convert('L')
|
| |
|
| | img = img.resize((28, 28))
|
| |
|
| | img_array = np.array(img)
|
| |
|
| | img_array = 255 - img_array
|
| |
|
| | img_array = img_array.reshape(784)
|
| | return img_array
|
| |
|
| | def select_image():
|
| | try:
|
| | root = tk.Tk()
|
| | root.withdraw()
|
| | root.attributes('-topmost', True)
|
| |
|
| | file_path = filedialog.askopenfilename(
|
| | title='选择手写数字图片',
|
| | filetypes=[
|
| | ('图片文件', '*.png *.jpg *.jpeg *.bmp *.gif'),
|
| | ('所有文件', '*.*')
|
| | ]
|
| | )
|
| |
|
| | root.destroy()
|
| | return file_path if file_path else None
|
| |
|
| | except Exception as e:
|
| | print(f"选择文件时出错:{str(e)}")
|
| | return None
|
| |
|
| | def select_image_alternative():
|
| | """备选的文件选择方法"""
|
| | print("\n请直接输入图片文件的完整路径:")
|
| | file_path = input().strip()
|
| | if os.path.exists(file_path):
|
| | return file_path
|
| | print("文件不存在!")
|
| | return None
|
| |
|
| |
|
| | if __name__ == "__main__":
|
| | if input("是否需要重新训练模型?(y/n): ").lower() == 'y':
|
| | main()
|
| | else:
|
| | if not os.path.exists('mnist_model.pkl'):
|
| | print("\n错误:找不到模型文件 'mnist_model.pkl'")
|
| | print("请先训练模型(输入 'y' 进行训练)再进行预测。")
|
| | exit()
|
| |
|
| | try:
|
| | print("\n选择预测模式:")
|
| | print("1. 使用MNIST测试集样本")
|
| | print("2. 使用本地图片文件")
|
| | choice = input("请选择(1/2):")
|
| |
|
| | nn = NeuralNetwork(784, 30, 60, 10)
|
| | nn.load_model('mnist_model.pkl')
|
| |
|
| | if choice == '1':
|
| |
|
| | print("\n加载测试数据...")
|
| | X, y = fetch_openml('mnist_784', version=1, return_X_y=True, as_frame=False)
|
| | test_samples = X[:10]
|
| | true_labels = y[:10]
|
| |
|
| | print("\n预测演示:")
|
| | for i, (sample, true_label) in enumerate(zip(test_samples, true_labels)):
|
| | predicted = nn.predict(sample)
|
| | print(f"样本 {i+1}: 预测值 = {predicted}, 实际值 = {true_label}")
|
| |
|
| | elif choice == '2':
|
| | print("\n准备打开文件选择对话框...")
|
| | try:
|
| | print("\n请在弹出的对话框中选择图片文件...")
|
| | image_path = select_image()
|
| | if not image_path:
|
| | print("\n使用备选方法...")
|
| | image_path = select_image_alternative()
|
| | if not image_path:
|
| | print("未能获取有效的图片文件路径")
|
| | exit()
|
| |
|
| | print(f"已选择文件:{image_path}")
|
| | img_data = load_image(image_path)
|
| | predicted = nn.predict(img_data)
|
| | print(f"\n预测结果:这个数字是 {predicted}")
|
| | except Exception as e:
|
| | print(f"\n选择文件过程中发生错误:{str(e)}")
|
| | print("请确保系统支持文件对话框操作。")
|
| |
|
| | else:
|
| | print("无效的选择")
|
| |
|
| | except Exception as e:
|
| | print(f"\n发生错误:{str(e)}")
|
| | print("请确保已经完成模型训练,并且模型文件正确保存。")
|
| |
|