Spaces:
Running
Running
| import io | |
| import gradio as gr | |
| import matplotlib.pyplot as plt | |
| import numexpr | |
| import numpy as np | |
| from PIL import Image | |
| import logging | |
| logging.basicConfig( | |
| level=logging.INFO, # set minimum level to capture (DEBUG, INFO, WARNING, ERROR, CRITICAL) | |
| format="%(asctime)s [%(levelname)s] %(message)s", # log format | |
| ) | |
| logger = logging.getLogger("ELVIS") | |
| from optimisers import get_gradient_1d, get_hessian_1d, get_optimizer_trajectory_1d | |
| class Univariate: | |
| DEFAULT_UNIVARIATE = "x ** 2" | |
| DEFAULT_INIT_X = 0.5 | |
| def __init__(self, width, height): | |
| self.canvas_width = width | |
| self.canvas_height = height | |
| self.optimiser_type = "Gradient Descent" | |
| self.learning_rate = 0.1 | |
| self.num_steps = 20 | |
| self.momentum = 0 | |
| self.function = self.DEFAULT_UNIVARIATE | |
| self.initial_x = self.DEFAULT_INIT_X | |
| self.trajectory_x, self.trajectory_y = get_optimizer_trajectory_1d( | |
| self.DEFAULT_UNIVARIATE, | |
| self.DEFAULT_INIT_X, | |
| self.optimiser_type, | |
| self.learning_rate, | |
| self.momentum, | |
| self.num_steps, | |
| ) | |
| self.trajectory_idx = 0 | |
| self.plots = [] | |
| self.generate_plots() | |
| def generate_plots(self): | |
| self.plots.clear() | |
| fig, ax = plt.subplots() | |
| for idx in range(self.num_steps): | |
| traj_x_min = np.min(self.trajectory_x[:idx + 1]) | |
| traj_x_max = np.max(self.trajectory_x[:idx + 1]) | |
| x_radius = np.maximum(np.abs(traj_x_min), np.abs(traj_x_max)) | |
| if x_radius > 1: | |
| x = np.linspace(-1.2 * x_radius, 1.2 * x_radius, 100) | |
| else: | |
| x = np.linspace(-1, 1, 100) | |
| try: | |
| y = numexpr.evaluate(self.function, local_dict={'x': x}) | |
| except Exception as e: | |
| logger.error("Error evaluating function '%s': %s", function, e) | |
| y = np.zeros_like(x) | |
| ax.clear() | |
| ax.plot(x, y) | |
| ax.set_xlabel("x") | |
| ax.set_ylabel("f(x)") | |
| ax.plot(self.trajectory_x[:idx + 1], self.trajectory_y[:idx + 1], marker='o', color='indianred') | |
| ax.plot(self.trajectory_x[idx], self.trajectory_y[idx], marker='o', color='red') | |
| buf = io.BytesIO() | |
| fig.savefig(buf, format="png", bbox_inches="tight", pad_inches=0) | |
| plt.close(fig) | |
| buf.seek(0) | |
| img = Image.open(buf) | |
| # Append the generated plot to the list | |
| self.plots.append(img) | |
| def update_plot(self): | |
| plot = self.plots[self.trajectory_idx] | |
| self.univariate_plot = plot | |
| return plot | |
| def update_optimiser_type(self, optimiser_type): | |
| self.optimiser_type = optimiser_type | |
| def update_trajectory(self): | |
| trajectory_x, trajectory_y = get_optimizer_trajectory_1d( | |
| self.function, | |
| self.initial_x, | |
| self.optimiser_type, | |
| self.learning_rate, | |
| self.momentum, | |
| self.num_steps, | |
| ) | |
| self.trajectory_x = trajectory_x | |
| self.trajectory_y = trajectory_y | |
| def update_trajectory_slider(self, trajectory_idx): | |
| self.trajectory_idx = trajectory_idx | |
| def update_learning_rate(self, learning_rate): | |
| self.learning_rate = learning_rate | |
| def update_initial_x(self, initial_x): | |
| self.initial_x = initial_x | |
| def update_function(self, function): | |
| self.function = function | |
| def show_relevant_params(self, optimiser_type): | |
| if optimiser_type == "Gradient Descent": | |
| learning_rate = gr.update(visible=True) | |
| hessian = gr.update(visible=False) | |
| momentum = gr.update(visible=True) | |
| else: | |
| learning_rate = gr.update(visible=False) | |
| hessian = gr.update(visible=True) | |
| momentum = gr.update(visible=False) | |
| return hessian, learning_rate, momentum | |
| def handle_trajectory_change(self): | |
| self.update_trajectory() | |
| self.generate_plots() | |
| self.handle_slider_change(0) # reset slider | |
| self.update_plot() | |
| def handle_optimiser_type_change(self, optimiser_type): | |
| self.update_optimiser_type(optimiser_type) | |
| self.handle_trajectory_change() | |
| hessian_update, learning_rate_update, momentum_update = self.show_relevant_params(optimiser_type) | |
| return self.trajectory_idx, hessian_update, learning_rate_update, momentum_update, self.univariate_plot | |
| def handle_learning_rate_change(self, learning_rate): | |
| self.update_learning_rate(learning_rate) | |
| self.handle_trajectory_change() | |
| return self.trajectory_idx, self.univariate_plot | |
| def handle_momentum_change(self, momentum): | |
| self.momentum = momentum | |
| self.handle_trajectory_change() | |
| return self.trajectory_idx, self.univariate_plot | |
| def handle_slider_change(self, trajectory_idx): | |
| self.update_trajectory_slider(trajectory_idx) | |
| self.update_plot() | |
| return self.univariate_plot | |
| def handle_trajectory_button(self): | |
| if self.trajectory_idx < self.num_steps - 1: | |
| self.trajectory_idx += 1 | |
| # plot is updated from slider changing | |
| return self.trajectory_idx | |
| def handle_initial_x_change(self, initial_x): | |
| self.update_initial_x(initial_x) | |
| self.handle_trajectory_change() | |
| return self.trajectory_idx, self.univariate_plot | |
| def handle_function_change(self, function): | |
| self.update_function(function) | |
| self.handle_trajectory_change() | |
| gradient = f"{get_gradient_1d(function)}" | |
| hessian = f"{get_hessian_1d(function)}" | |
| return self.trajectory_idx, gradient, hessian, self.univariate_plot | |
| def reset(self): | |
| self.optimiser_type = "Gradient Descent" | |
| self.learning_rate = 0.1 | |
| self.num_steps = 20 | |
| self.function = self.DEFAULT_UNIVARIATE | |
| self.initial_x = self.DEFAULT_INIT_X | |
| self.trajectory_x, self.trajectory_y = get_optimizer_trajectory_1d( | |
| self.DEFAULT_UNIVARIATE, | |
| self.DEFAULT_INIT_X, | |
| self.optimiser_type, | |
| self.learning_rate, | |
| self.momentum, | |
| self.num_steps, | |
| ) | |
| self.trajectory_idx = 0 | |
| self.plots = [] | |
| self.generate_plots() | |
| def build(self): | |
| with gr.Tab("Univariate"): | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| self.univariate_plot = gr.Image( | |
| value=self.update_plot(), | |
| container=True, | |
| ) | |
| with gr.Column(scale=1): | |
| with gr.Tab("Settings"): | |
| function = gr.Textbox(label="Function", value=self.DEFAULT_UNIVARIATE, interactive=True) | |
| gradient = gr.Textbox( | |
| label="Derivative", | |
| value=f"{get_gradient_1d(self.DEFAULT_UNIVARIATE)}", | |
| interactive=False, | |
| ) | |
| hessian = gr.Textbox( | |
| label="Second Derivative", | |
| value=f"{get_hessian_1d(self.DEFAULT_UNIVARIATE)}", | |
| interactive=False, | |
| visible=False, | |
| ) | |
| optimiser_type = gr.Dropdown( | |
| label="Optimiser", | |
| choices=["Gradient Descent", "Newton"], | |
| value="Gradient Descent", | |
| interactive=True, | |
| ) | |
| initial_x = gr.Number(label="Initial X", value=self.DEFAULT_INIT_X, interactive=True) | |
| with gr.Row(): | |
| learning_rate = gr.Number(label="Learning Rate", value=self.learning_rate, interactive=True) | |
| momentum = gr.Number(label="Momentum", value=self.momentum, interactive=True) | |
| with gr.Tab("Optimize"): | |
| trajectory_slider = gr.Slider( | |
| label="Optimisation Step", | |
| minimum=0, | |
| maximum=self.num_steps - 1, | |
| step=1, | |
| value=0, | |
| interactive=True, | |
| ) | |
| trajectory_button = gr.Button("Optimisation Step") | |
| function.submit(self.handle_function_change, inputs=[function], outputs=[trajectory_slider, gradient, hessian, self.univariate_plot]) | |
| initial_x.submit(self.handle_initial_x_change, inputs=[initial_x], outputs=[trajectory_slider, self.univariate_plot]) | |
| learning_rate.submit(self.handle_learning_rate_change, inputs=[learning_rate], outputs=[trajectory_slider, self.univariate_plot]) | |
| momentum.submit(self.handle_momentum_change, inputs=[momentum], outputs=[trajectory_slider, self.univariate_plot]) | |
| optimiser_type.change( | |
| self.handle_optimiser_type_change, | |
| inputs=[optimiser_type], | |
| outputs=[trajectory_slider, hessian, learning_rate, momentum, self.univariate_plot] | |
| ) | |
| trajectory_slider.change(self.handle_slider_change, inputs=[trajectory_slider], outputs=[self.univariate_plot]) | |
| trajectory_button.click(self.handle_trajectory_button, outputs=[trajectory_slider]) | |