|
|
import numpy as np |
|
|
from cnn_model import build_cnn_model |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def objective_function(x, X_train, y_train): |
|
|
filter1 = int(x[0]) |
|
|
filter2 = int(x[1]) |
|
|
filter3 = int(x[2]) |
|
|
learning_rate = float(x[3]) |
|
|
dropout = float(x[4]) |
|
|
|
|
|
model = build_cnn_model( |
|
|
input_shape=(X_train.shape[1], X_train.shape[2]), |
|
|
num_classes=y_train.shape[1], |
|
|
filter1=filter1, |
|
|
filter2=filter2, |
|
|
filter3=filter3, |
|
|
learning_rate=learning_rate, |
|
|
dropout=dropout |
|
|
) |
|
|
|
|
|
history = model.fit( |
|
|
X_train, y_train, |
|
|
validation_split=0.2, |
|
|
epochs=3, |
|
|
batch_size=32, |
|
|
verbose=0 |
|
|
) |
|
|
|
|
|
val_acc = history.history['val_accuracy'][-1] |
|
|
return 1 - val_acc |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def devsca(objf, lb, ub, dim, N, T, X_train, y_train): |
|
|
X = np.random.uniform(0, 1, (N, dim)) * (np.array(ub) - np.array(lb)) + np.array(lb) |
|
|
fitness = np.array([objf(ind, X_train, y_train) for ind in X]) |
|
|
best_idx = np.argmin(fitness) |
|
|
best_X = X[best_idx].copy() |
|
|
best_fit = fitness[best_idx] |
|
|
convergence_curve = [] |
|
|
|
|
|
for t in range(T): |
|
|
r1 = np.linspace(2, 0, T)[t] |
|
|
for i in range(N): |
|
|
for j in range(dim): |
|
|
r2 = 2 * np.pi * np.random.rand() |
|
|
r3 = 2 * np.random.rand() |
|
|
r4 = np.random.rand() |
|
|
if r4 < 0.5: |
|
|
X[i, j] = X[i, j] + r1 * np.sin(r2) * abs(r3 * best_X[j] - X[i, j]) |
|
|
else: |
|
|
X[i, j] = X[i, j] + r1 * np.cos(r2) * abs(r3 * best_X[j] - X[i, j]) |
|
|
|
|
|
X[i] = np.clip(X[i], lb, ub) |
|
|
|
|
|
fit = objf(X[i], X_train, y_train) |
|
|
if fit < best_fit: |
|
|
best_fit = fit |
|
|
best_X = X[i].copy() |
|
|
|
|
|
convergence_curve.append(best_fit) |
|
|
print(f"Iteration {t+1}/{T} | Best Accuracy: {1 - best_fit:.4f}") |
|
|
|
|
|
return best_X, best_fit, convergence_curve |
|
|
|