joel-woodfield commited on
Commit
303a85e
·
1 Parent(s): 9357e05

Change to static sdk

Browse files
Dockerfile DELETED
@@ -1,20 +0,0 @@
1
- # Step 1: Build your app
2
- FROM node:20 AS builder
3
- WORKDIR /app
4
- COPY frontends/react/package*.json ./frontends/react/
5
- RUN cd frontends/react && npm install
6
- COPY . .
7
- RUN cd frontends/react && npm run build
8
-
9
- # Step 2: Serve it with npx
10
- FROM node:20-slim
11
- WORKDIR /app
12
- # Only copy the built files from the builder
13
- COPY --from=builder /app/frontends/react/dist ./dist
14
-
15
- # Install the server tool
16
- RUN npm install -g serve
17
-
18
- # Serve the 'dist' folder on the correct Hugging Face port
19
- # -s flag handles Single Page App routing (important for React)
20
- CMD ["serve", "-s", "dist", "-l", "7860"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -3,7 +3,8 @@ title: Optimization
3
  emoji: 🚀
4
  colorFrom: purple
5
  colorTo: indigo
6
- sdk: docker
 
7
  pinned: false
8
  ---
9
 
 
3
  emoji: 🚀
4
  colorFrom: purple
5
  colorTo: indigo
6
+ sdk: static
7
+ app_file: dist/index.html
8
  pinned: false
9
  ---
10
 
dist/assets/index-BE9C_h4C.css ADDED
@@ -0,0 +1 @@
 
 
1
+ @layer properties{@supports (((-webkit-hyphens:none)) and (not (margin-trim:inline))) or ((-moz-orient:inline) and (not (color:rgb(from red r g b)))){*,:before,:after,::backdrop{--tw-space-y-reverse:0;--tw-border-style:solid;--tw-font-weight:initial;--tw-tracking:initial}}}@layer theme{:root,:host{--font-sans:ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-mono:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--color-orange-200:oklch(90.1% .076 70.697);--color-orange-300:oklch(83.7% .128 66.29);--color-orange-500:oklch(70.5% .213 47.604);--color-blue-600:oklch(54.6% .245 262.881);--color-slate-50:oklch(98.4% .003 247.858);--color-slate-200:oklch(92.9% .013 255.508);--color-slate-800:oklch(27.9% .041 260.031);--color-gray-100:oklch(96.7% .003 264.542);--color-gray-200:oklch(92.8% .006 264.531);--color-gray-300:oklch(87.2% .01 258.338);--color-gray-700:oklch(37.3% .034 259.733);--color-gray-950:oklch(13% .028 261.692);--color-white:#fff;--spacing:.25rem;--text-sm:.875rem;--text-sm--line-height:calc(1.25/.875);--text-xl:1.25rem;--text-xl--line-height:calc(1.75/1.25);--font-weight-semibold:600;--tracking-tight:-.025em;--animate-spin:spin 1s linear infinite;--default-font-family:var(--font-sans);--default-mono-font-family:var(--font-mono)}}@layer base{*,:after,:before,::backdrop{box-sizing:border-box;border:0 solid;margin:0;padding:0}::file-selector-button{box-sizing:border-box;border:0 solid;margin:0;padding:0}html,:host{-webkit-text-size-adjust:100%;tab-size:4;line-height:1.5;font-family:var(--default-font-family,ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji");font-feature-settings:var(--default-font-feature-settings,normal);font-variation-settings:var(--default-font-variation-settings,normal);-webkit-tap-highlight-color:transparent}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;-webkit-text-decoration:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:var(--default-mono-font-family,ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace);font-feature-settings:var(--default-mono-font-feature-settings,normal);font-variation-settings:var(--default-mono-font-variation-settings,normal);font-size:1em}small{font-size:80%}sub,sup{vertical-align:baseline;font-size:75%;line-height:0;position:relative}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}:-moz-focusring{outline:auto}progress{vertical-align:baseline}summary{display:list-item}ol,ul,menu{list-style:none}img,svg,video,canvas,audio,iframe,embed,object{vertical-align:middle;display:block}img,video{max-width:100%;height:auto}button,input,select,optgroup,textarea{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}::file-selector-button{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}:where(select:is([multiple],[size])) optgroup{font-weight:bolder}:where(select:is([multiple],[size])) optgroup option{padding-inline-start:20px}::file-selector-button{margin-inline-end:4px}::placeholder{opacity:1}@supports (not ((-webkit-appearance:-apple-pay-button))) or (contain-intrinsic-size:1px){::placeholder{color:currentColor}@supports (color:color-mix(in lab,red,red)){::placeholder{color:color-mix(in oklab,currentcolor 50%,transparent)}}}textarea{resize:vertical}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-date-and-time-value{min-height:1lh;text-align:inherit}::-webkit-datetime-edit{display:inline-flex}::-webkit-datetime-edit-fields-wrapper{padding:0}::-webkit-datetime-edit{padding-block:0}::-webkit-datetime-edit-year-field{padding-block:0}::-webkit-datetime-edit-month-field{padding-block:0}::-webkit-datetime-edit-day-field{padding-block:0}::-webkit-datetime-edit-hour-field{padding-block:0}::-webkit-datetime-edit-minute-field{padding-block:0}::-webkit-datetime-edit-second-field{padding-block:0}::-webkit-datetime-edit-millisecond-field{padding-block:0}::-webkit-datetime-edit-meridiem-field{padding-block:0}::-webkit-calendar-picker-indicator{line-height:1}:-moz-ui-invalid{box-shadow:none}button,input:where([type=button],[type=reset],[type=submit]){appearance:button}::file-selector-button{appearance:button}::-webkit-inner-spin-button{height:auto}::-webkit-outer-spin-button{height:auto}[hidden]:where(:not([hidden=until-found])){display:none!important}}@layer components;@layer utilities{.fixed{position:fixed}.relative{position:relative}.inset-0{inset:calc(var(--spacing)*0)}.z-50{z-index:50}.mb-4{margin-bottom:calc(var(--spacing)*4)}.flex{display:flex}.grid{display:grid}.h-16{height:calc(var(--spacing)*16)}.h-dvh{height:100dvh}.h-full{height:100%}.w-16{width:calc(var(--spacing)*16)}.w-full{width:100%}.animate-spin{animation:var(--animate-spin)}.cursor-pointer{cursor:pointer}.grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.grid-cols-\[2fr_1fr\]{grid-template-columns:2fr 1fr}.flex-col{flex-direction:column}.items-center{align-items:center}.justify-center{justify-content:center}.gap-1{gap:calc(var(--spacing)*1)}.gap-2{gap:calc(var(--spacing)*2)}.gap-4{gap:calc(var(--spacing)*4)}:where(.space-y-6>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*6)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*6)*calc(1 - var(--tw-space-y-reverse)))}.rounded{border-radius:.25rem}.rounded-full{border-radius:3.40282e38px}.border{border-style:var(--tw-border-style);border-width:1px}.border-4{border-style:var(--tw-border-style);border-width:4px}.border-b-2{border-bottom-style:var(--tw-border-style);border-bottom-width:2px}.border-gray-300{border-color:var(--color-gray-300)}.border-orange-500{border-color:var(--color-orange-500)}.border-slate-200{border-color:var(--color-slate-200)}.border-t-blue-600{border-top-color:var(--color-blue-600)}.bg-gray-100{background-color:var(--color-gray-100)}.bg-orange-200{background-color:var(--color-orange-200)}.bg-slate-50{background-color:var(--color-slate-50)}.bg-white{background-color:var(--color-white)}.p-2{padding:calc(var(--spacing)*2)}.p-4{padding:calc(var(--spacing)*4)}.px-5{padding-inline:calc(var(--spacing)*5)}.py-2{padding-block:calc(var(--spacing)*2)}.text-center{text-align:center}.text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.text-xl{font-size:var(--text-xl);line-height:var(--tw-leading,var(--text-xl--line-height))}.font-semibold{--tw-font-weight:var(--font-weight-semibold);font-weight:var(--font-weight-semibold)}.tracking-tight{--tw-tracking:var(--tracking-tight);letter-spacing:var(--tracking-tight)}.text-gray-700{color:var(--color-gray-700)}.text-gray-950{color:var(--color-gray-950)}.text-orange-500{color:var(--color-orange-500)}.text-slate-800{color:var(--color-slate-800)}@media(hover:hover){.hover\:bg-gray-200:hover{background-color:var(--color-gray-200)}.hover\:bg-orange-300:hover{background-color:var(--color-orange-300)}}}@property --tw-space-y-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-border-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-font-weight{syntax:"*";inherits:false}@property --tw-tracking{syntax:"*";inherits:false}@keyframes spin{to{transform:rotate(360deg)}}
dist/assets/index-BSZlS5Yr.js ADDED
The diff for this file is too large to render. See raw diff
 
dist/assets/pyodide.worker-BeUH2O5o.js ADDED
@@ -0,0 +1,828 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ (function(){"use strict";var i=`import numpy as np
2
+ from sympy import sympify, lambdify
3
+
4
+ from optimization_logic import *
5
+
6
+
7
+ class OptimizationManager:
8
+ def __init__(self):
9
+ self.function_values = {"x": [], "y": []}
10
+ self.trajectory_values = {"x": [], "y": []}
11
+ self.settings = {}
12
+
13
+ def handle_update_settings(self, new_settings) -> dict[str, dict] | None:
14
+ if new_settings == self.settings:
15
+ return None
16
+
17
+ self.settings = new_settings
18
+
19
+ function = new_settings.get("functionExpr", "").strip()
20
+ mode = new_settings.get("mode", "").lower().strip()
21
+ xlim = new_settings.get("xlim", [])
22
+ ylim = new_settings.get("ylim", [])
23
+
24
+ if not self._is_valid_function(function, mode, xlim, ylim):
25
+ return {
26
+ "trajectoryValues": {"x": [], "y": []},
27
+ "functionValues": {"x": [], "y": []},
28
+ }
29
+
30
+ self._reset_trajectory() # Must reset trajectory on any settings change that is valid
31
+
32
+ if not self._function_changed(function, mode):
33
+ return {
34
+ "trajectoryValues": self.trajectory_values,
35
+ }
36
+
37
+ try:
38
+ self._compute_function_values(function, mode, xlim, ylim)
39
+ except Exception as e:
40
+ self.function_values = {"x": [], "y": []}
41
+ self.trajectory_values = {"x": [], "y": []}
42
+
43
+ return {
44
+ "functionValues": self.function_values,
45
+ "trajectoryValues": self.trajectory_values,
46
+ }
47
+
48
+ def handle_reset(self) -> dict[str, list]:
49
+ self._reset_trajectory()
50
+ return {
51
+ "trajectoryValues": self.trajectory_values,
52
+ }
53
+
54
+ def handle_next_step(self) -> dict[str, list]:
55
+ current_steps = len(self.trajectory_values["x"])
56
+ self._compute_trajectory_values(self.settings, current_steps + 1)
57
+ return {
58
+ "trajectoryValues": self.trajectory_values,
59
+ }
60
+
61
+ def handle_prev_step(self) -> dict[str, list]:
62
+ current_steps = len(self.trajectory_values["x"])
63
+ if current_steps > 1:
64
+ self._compute_trajectory_values(self.settings, current_steps - 1)
65
+ return {
66
+ "trajectoryValues": self.trajectory_values,
67
+ }
68
+
69
+ def handle_play(self) -> dict[str, list]:
70
+ pass
71
+
72
+ def handle_pause(self) -> dict[str, list]:
73
+ pass
74
+
75
+ def _is_valid_function(
76
+ self, function: str, mode: str, xlim: list, ylim: list
77
+ ) -> bool:
78
+ # axis limit checks
79
+ if len(xlim) != 2 or len(ylim) != 2:
80
+ return False
81
+ if xlim[0] >= xlim[1] or ylim[0] >= ylim[1]:
82
+ return False
83
+
84
+ # function expression check
85
+ try:
86
+ expr = sympify(function)
87
+ symbols = {s.name for s in expr.free_symbols}
88
+ if mode == "univariate":
89
+ return symbols in {frozenset({'x'}), frozenset(set())}
90
+ elif mode == "bivariate":
91
+ return symbols in {
92
+ frozenset({'x', 'y'}),
93
+ frozenset({'x'}),
94
+ frozenset({'y'}),
95
+ frozenset(set()),
96
+ }
97
+ else:
98
+ return False
99
+
100
+ except Exception as e:
101
+ pass
102
+
103
+ return False
104
+
105
+ def _function_changed(self, function: str, mode: str) -> bool:
106
+ function = function.strip()
107
+ previous_function = self.settings.get("function", "").strip()
108
+ previous_mode = self.settings.get("mode", "")
109
+ return function != previous_function or mode != previous_mode
110
+
111
+ def _reset_trajectory(self) -> None:
112
+ try:
113
+ self._compute_trajectory_values(self.settings, steps=1)
114
+ except Exception as e:
115
+ self.trajectory_values = {"x": [], "y": []}
116
+
117
+ def _compute_function_values(self, function: str, mode: str, xlim: list, ylim: list) -> None:
118
+ expr = sympify(function)
119
+ if mode == "univariate":
120
+ x = np.linspace(xlim[0], xlim[1], 100)
121
+ f = lambdify('x', expr, modules=['numpy'])
122
+ y = f(x)
123
+
124
+ if not isinstance(y, np.ndarray):
125
+ y = np.full_like(x, y)
126
+
127
+ self.function_values = {
128
+ "x": x.tolist(),
129
+ "y": y.tolist(),
130
+ }
131
+
132
+ elif mode == "bivariate":
133
+ x = np.linspace(xlim[0], xlim[1], 100)
134
+ y = np.linspace(ylim[0], ylim[1], 100)
135
+ X, Y = np.meshgrid(x, y)
136
+ f = lambdify(('x', 'y'), expr, modules=['numpy'])
137
+ Z = f(X, Y)
138
+
139
+ if not isinstance(Z, np.ndarray):
140
+ Z = np.full_like(X, Z)
141
+
142
+ self.function_values = {
143
+ "x": x.tolist(),
144
+ "y": y.tolist(),
145
+ "z": Z.tolist(),
146
+ }
147
+
148
+ else:
149
+ raise ValueError("Unsupported mode")
150
+
151
+ def _compute_trajectory_values(self, settings: dict, steps: int) -> None:
152
+ mode = settings.get("mode", "").lower().strip()
153
+ algorithm = settings.get("algorithm", "").lower().strip().replace(" ", "_")
154
+ function = sympify(settings.get("functionExpr", "").strip())
155
+
156
+ if mode == "univariate":
157
+ if algorithm == "gradient_descent":
158
+ self.trajectory_values = gd_univariate(
159
+ function,
160
+ float(settings["x0"]),
161
+ float(settings["learningRate"]),
162
+ float(settings["momentum"]),
163
+ steps,
164
+ )
165
+ elif algorithm == "nesterov":
166
+ self.trajectory_values = nesterov_univariate(
167
+ function,
168
+ float(settings["x0"]),
169
+ float(settings["learningRate"]),
170
+ float(settings["momentum"]),
171
+ steps,
172
+ )
173
+ elif algorithm == "adam":
174
+ self.trajectory_values = adam_univariate(
175
+ function,
176
+ float(settings["x0"]),
177
+ float(settings["learningRate"]),
178
+ float(settings["beta1"]),
179
+ float(settings["beta2"]),
180
+ float(settings["epsilon"]),
181
+ steps,
182
+ )
183
+ elif algorithm == "adagrad":
184
+ self.trajectory_values = adagrad_univariate(
185
+ function,
186
+ float(settings["x0"]),
187
+ float(settings["learningRate"]),
188
+ float(settings["epsilon"]),
189
+ steps,
190
+ )
191
+ elif algorithm == "rmsprop":
192
+ self.trajectory_values = rmsprop_univariate(
193
+ function,
194
+ float(settings["x0"]),
195
+ float(settings["learningRate"]),
196
+ float(settings["beta"]),
197
+ float(settings["epsilon"]),
198
+ steps,
199
+ )
200
+ elif algorithm == "adadelta":
201
+ self.trajectory_values = adadelta_univariate(
202
+ function,
203
+ float(settings["x0"]),
204
+ float(settings["beta"]),
205
+ float(settings["epsilon"]),
206
+ steps,
207
+ )
208
+ elif algorithm == "newton":
209
+ self.trajectory_values = newton_univariate(
210
+ function,
211
+ float(settings["x0"]),
212
+ steps,
213
+ )
214
+ else:
215
+ raise ValueError("Unsupported algorithm for univariate mode")
216
+
217
+ elif mode == "bivariate":
218
+ if algorithm == "gradient_descent":
219
+ self.trajectory_values = gd_bivariate(
220
+ function,
221
+ float(settings["x0"]),
222
+ float(settings["y0"]),
223
+ float(settings["learningRate"]),
224
+ float(settings["momentum"]),
225
+ steps,
226
+ )
227
+ elif algorithm == "nesterov":
228
+ self.trajectory_values = nesterov_bivariate(
229
+ function,
230
+ float(settings["x0"]),
231
+ float(settings["y0"]),
232
+ float(settings["learningRate"]),
233
+ float(settings["momentum"]),
234
+ steps,
235
+ )
236
+ elif algorithm == "adam":
237
+ self.trajectory_values = adam_bivariate(
238
+ function,
239
+ float(settings["x0"]),
240
+ float(settings["y0"]),
241
+ float(settings["learningRate"]),
242
+ float(settings["beta1"]),
243
+ float(settings["beta2"]),
244
+ float(settings["epsilon"]),
245
+ steps,
246
+ )
247
+ elif algorithm == "adagrad":
248
+ self.trajectory_values = adagrad_bivariate(
249
+ function,
250
+ float(settings["x0"]),
251
+ float(settings["y0"]),
252
+ float(settings["learningRate"]),
253
+ float(settings["epsilon"]),
254
+ steps,
255
+ )
256
+ elif algorithm == "rmsprop":
257
+ self.trajectory_values = rmsprop_bivariate(
258
+ function,
259
+ float(settings["x0"]),
260
+ float(settings["y0"]),
261
+ float(settings["learningRate"]),
262
+ float(settings["beta"]),
263
+ float(settings["epsilon"]),
264
+ steps,
265
+ )
266
+ elif algorithm == "adadelta":
267
+ self.trajectory_values = adadelta_bivariate(
268
+ function,
269
+ float(settings["x0"]),
270
+ float(settings["y0"]),
271
+ float(settings["beta"]),
272
+ float(settings["epsilon"]),
273
+ steps,
274
+ )
275
+ elif algorithm == "newton":
276
+ self.trajectory_values = newton_bivariate(
277
+ function,
278
+ float(settings["x0"]),
279
+ float(settings["y0"]),
280
+ steps,
281
+ )
282
+ else:
283
+ raise ValueError("Unsupported algorithm for bivariate mode")
284
+
285
+ `,l=`import numpy as np
286
+ from sympy import lambdify, Expr
287
+
288
+
289
+ def gd_univariate(
290
+ function: Expr,
291
+ x0: float,
292
+ learning_rate: float,
293
+ momentum: float,
294
+ steps: int,
295
+ ) -> dict:
296
+ """
297
+ Perform gradient descent on a univariate function.
298
+
299
+ Assumes function is valid and in terms of x
300
+ """
301
+ f = lambdify('x', function, modules=['numpy'])
302
+ f_prime = lambdify('x', function.diff('x'), modules=['numpy'])
303
+
304
+ x_values = [x0]
305
+ y_values = [f(x0)]
306
+
307
+ x = x0
308
+ for i in range(steps - 1):
309
+ if i == 0:
310
+ m = 0
311
+ else:
312
+ m = momentum * (x_values[-1] - x_values[-2])
313
+
314
+ x = x - learning_rate * f_prime(x) + m
315
+ x_values.append(x)
316
+ y_values.append(f(x))
317
+
318
+ return {
319
+ "x": x_values,
320
+ "y": y_values,
321
+ }
322
+
323
+
324
+ def gd_bivariate(
325
+ function: Expr,
326
+ x0: float,
327
+ y0: float,
328
+ learning_rate: float,
329
+ momentum: float,
330
+ steps: int,
331
+ ) -> dict:
332
+ f = lambdify(('x', 'y'), function, modules=['numpy'])
333
+ fx = lambdify(('x', 'y'), function.diff('x'), modules=['numpy'])
334
+ fy = lambdify(('x', 'y'), function.diff('y'), modules=['numpy'])
335
+
336
+ x_values = [x0]
337
+ y_values = [y0]
338
+ z_values = [f(x0, y0)]
339
+
340
+ x = x0
341
+ y = y0
342
+ for i in range(steps -1):
343
+ if i == 0:
344
+ mx = 0
345
+ my = 0
346
+ else:
347
+ mx = momentum * (x_values[-1] - x_values[-2])
348
+ my = momentum * (y_values[-1] - y_values[-2])
349
+
350
+ x = x - learning_rate * fx(x, y) + mx
351
+ y = y - learning_rate * fy(x, y) + my
352
+ x_values.append(x)
353
+ y_values.append(y)
354
+ z_values.append(f(x, y))
355
+
356
+ return {
357
+ "x": x_values,
358
+ "y": y_values,
359
+ "z": z_values,
360
+ }
361
+
362
+
363
+ def nesterov_univariate(
364
+ function: Expr,
365
+ x0: float,
366
+ learning_rate: float,
367
+ momentum: float,
368
+ steps: int,
369
+ ) -> dict:
370
+ f = lambdify('x', function, modules=['numpy'])
371
+ f_prime = lambdify('x', function.diff('x'), modules=['numpy'])
372
+
373
+ x_values = [x0]
374
+ y_values = [f(x0)]
375
+
376
+ x = x0
377
+ for i in range(steps - 1):
378
+ if i == 0:
379
+ m = 0
380
+ else:
381
+ m = momentum * (x_values[-1] - x_values[-2])
382
+
383
+ x_lookahead = x - m
384
+ x = x_lookahead - learning_rate * f_prime(x_lookahead)
385
+
386
+ x_values.append(x)
387
+ y_values.append(f(x))
388
+
389
+ return {
390
+ "x": x_values,
391
+ "y": y_values,
392
+ }
393
+
394
+
395
+ def nesterov_bivariate(
396
+ function: Expr,
397
+ x0: float,
398
+ y0: float,
399
+ learning_rate: float,
400
+ momentum: float,
401
+ steps: int,
402
+ ) -> dict:
403
+ f = lambdify(('x', 'y'), function, modules=['numpy'])
404
+ fx = lambdify(('x', 'y'), function.diff('x'), modules=['numpy'])
405
+ fy = lambdify(('x', 'y'), function.diff('y'), modules=['numpy'])
406
+
407
+ x_values = [x0]
408
+ y_values = [y0]
409
+ z_values = [f(x0, y0)]
410
+
411
+ x = x0
412
+ y = y0
413
+ for i in range(steps - 1):
414
+ if i == 0:
415
+ mx = 0
416
+ my = 0
417
+ else:
418
+ mx = momentum * (x_values[-1] - x_values[-2])
419
+ my = momentum * (y_values[-1] - y_values[-2])
420
+
421
+ x_lookahead = x - mx
422
+ y_lookahead = y - my
423
+
424
+ x = x_lookahead - learning_rate * fx(x_lookahead, y_lookahead)
425
+ y = y_lookahead - learning_rate * fy(x_lookahead, y_lookahead)
426
+
427
+ x_values.append(x)
428
+ y_values.append(y)
429
+ z_values.append(f(x, y))
430
+
431
+ return {
432
+ "x": x_values,
433
+ "y": y_values,
434
+ "z": z_values,
435
+ }
436
+
437
+
438
+ def newton_univariate(
439
+ function: Expr,
440
+ x0: float,
441
+ steps: int,
442
+ ) -> dict:
443
+ f = lambdify('x', function, modules=['numpy'])
444
+ f_prime = lambdify('x', function.diff('x'), modules=['numpy'])
445
+ f_prime_prime = lambdify('x', function.diff('x', 2), modules=['numpy'])
446
+
447
+ x_values = [x0]
448
+ y_values = [f(x0)]
449
+
450
+ x = x0
451
+ for i in range(steps - 1):
452
+ x = x - f_prime(x) / f_prime_prime(x)
453
+ x_values.append(x)
454
+ y_values.append(f(x))
455
+
456
+ return {
457
+ "x": x_values,
458
+ "y": y_values,
459
+ }
460
+
461
+
462
+ def newton_bivariate(
463
+ function: Expr,
464
+ x0: float,
465
+ y0: float,
466
+ steps: int,
467
+ ) -> dict:
468
+ f = lambdify(('x', 'y'), function, modules=['numpy'])
469
+ fx = lambdify(('x', 'y'), function.diff('x'), modules=['numpy'])
470
+ fy = lambdify(('x', 'y'), function.diff('y'), modules=['numpy'])
471
+ fxx = lambdify(('x', 'y'), function.diff('x', 2), modules=['numpy'])
472
+ fyy = lambdify(('x', 'y'), function.diff('y', 2), modules=['numpy'])
473
+ fxy = lambdify(('x', 'y'), function.diff('x', 'y'), modules=['numpy'])
474
+
475
+ x_values = [x0]
476
+ y_values = [y0]
477
+ z_values = [f(x0, y0)]
478
+
479
+ x = x0
480
+ y = y0
481
+ for i in range(steps - 1):
482
+ hessian = np.array(
483
+ [
484
+ [fxx(x, y), fxy(x, y)],
485
+ [fxy(x, y), fyy(x, y)],
486
+ ],
487
+ )
488
+ grad = np.array([fx(x, y), fy(x, y)])
489
+
490
+ try:
491
+ # delta = hessian^-1 * grad
492
+ delta = np.linalg.solve(hessian, grad)
493
+ except np.linalg.LinAlgError:
494
+ # singular hessian - cannot proceed
495
+ break
496
+
497
+ x = x - delta[0]
498
+ y = y - delta[1]
499
+
500
+ x_values.append(x)
501
+ y_values.append(y)
502
+ z_values.append(f(x, y))
503
+
504
+ return {
505
+ "x": x_values,
506
+ "y": y_values,
507
+ "z": z_values,
508
+ }
509
+
510
+
511
+ def adagrad_univariate(
512
+ function: Expr,
513
+ x0: float,
514
+ learning_rate: float,
515
+ epsilon: float,
516
+ steps: int,
517
+ ) -> dict:
518
+ f = lambdify('x', function, modules=['numpy'])
519
+ f_prime = lambdify('x', function.diff('x'), modules=['numpy'])
520
+
521
+ x_values = [x0]
522
+ y_values = [f(x0)]
523
+
524
+ x = x0
525
+ v = 0 # accumulated squared gradients
526
+ for i in range(steps - 1):
527
+ g = f_prime(x)
528
+ v += g ** 2
529
+ x = x - (learning_rate / (np.sqrt(v) + epsilon)) * g
530
+
531
+ x_values.append(x)
532
+ y_values.append(f(x))
533
+
534
+ return {
535
+ "x": x_values,
536
+ "y": y_values,
537
+ }
538
+
539
+
540
+ def adagrad_bivariate(
541
+ function: Expr,
542
+ x0: float,
543
+ y0: float,
544
+ learning_rate: float,
545
+ epsilon: float,
546
+ steps: int,
547
+ ) -> dict:
548
+ f = lambdify(('x', 'y'), function, modules=['numpy'])
549
+ fx = lambdify(('x', 'y'), function.diff('x'), modules=['numpy'])
550
+ fy = lambdify(('x', 'y'), function.diff('y'), modules=['numpy'])
551
+
552
+ x_values = [x0]
553
+ y_values = [y0]
554
+ z_values = [f(x0, y0)]
555
+
556
+ x = x0
557
+ y = y0
558
+ # accumulated squared gradients
559
+ vx = 0
560
+ vy = 0
561
+ for i in range(steps - 1):
562
+ gx = fx(x, y)
563
+ gy = fy(x, y)
564
+ vx += gx ** 2
565
+ vy += gy ** 2
566
+
567
+ x = x - (learning_rate / (np.sqrt(vx) + epsilon)) * gx
568
+ y = y - (learning_rate / (np.sqrt(vy) + epsilon)) * gy
569
+
570
+ x_values.append(x)
571
+ y_values.append(y)
572
+ z_values.append(f(x, y))
573
+
574
+ return {
575
+ "x": x_values,
576
+ "y": y_values,
577
+ "z": z_values,
578
+ }
579
+
580
+
581
+ def rmsprop_univariate(
582
+ function: Expr,
583
+ x0: float,
584
+ learning_rate: float,
585
+ beta: float,
586
+ epsilon: float,
587
+ steps: int,
588
+ ) -> dict:
589
+ f = lambdify('x', function, modules=['numpy'])
590
+ f_prime = lambdify('x', function.diff('x'), modules=['numpy'])
591
+
592
+ x_values = [x0]
593
+ y_values = [f(x0)]
594
+
595
+ x = x0
596
+ v = 0 # exponentially weighted average of squared gradients
597
+ for i in range(steps - 1):
598
+ g = f_prime(x)
599
+ v = beta * v + (1 - beta) * g ** 2
600
+ x = x - (learning_rate / (np.sqrt(v) + epsilon)) * g
601
+
602
+ x_values.append(x)
603
+ y_values.append(f(x))
604
+
605
+ return {
606
+ "x": x_values,
607
+ "y": y_values,
608
+ }
609
+
610
+
611
+ def rmsprop_bivariate(
612
+ function: Expr,
613
+ x0: float,
614
+ y0: float,
615
+ learning_rate: float,
616
+ beta: float,
617
+ epsilon: float,
618
+ steps: int,
619
+ ) -> dict:
620
+ f = lambdify(('x', 'y'), function, modules=['numpy'])
621
+ fx = lambdify(('x', 'y'), function.diff('x'), modules=['numpy'])
622
+ fy = lambdify(('x', 'y'), function.diff('y'), modules=['numpy'])
623
+
624
+ x_values = [x0]
625
+ y_values = [y0]
626
+ z_values = [f(x0, y0)]
627
+
628
+ x = x0
629
+ y = y0
630
+ # exponentially weighted average of squared gradients
631
+ vx = 0
632
+ vy = 0
633
+ for i in range(steps - 1):
634
+ gx = fx(x, y)
635
+ gy = fy(x, y)
636
+ vx = beta * vx + (1 - beta) * gx ** 2
637
+ vy = beta * vy + (1 - beta) * gy ** 2
638
+
639
+ x = x - (learning_rate / (np.sqrt(vx) + epsilon)) * gx
640
+ y = y - (learning_rate / (np.sqrt(vy) + epsilon)) * gy
641
+
642
+ x_values.append(x)
643
+ y_values.append(y)
644
+ z_values.append(f(x, y))
645
+
646
+ return {
647
+ "x": x_values,
648
+ "y": y_values,
649
+ "z": z_values,
650
+ }
651
+
652
+
653
+ def adadelta_univariate(
654
+ function: Expr,
655
+ x0: float,
656
+ learning_rate: float,
657
+ beta: float,
658
+ epsilon: float,
659
+ steps: int,
660
+ ) -> dict:
661
+ f = lambdify('x', function, modules=['numpy'])
662
+ f_prime = lambdify('x', function.diff('x'), modules=['numpy'])
663
+
664
+ x_values = [x0]
665
+ y_values = [f(x0)]
666
+
667
+ x = x0
668
+ v = 0 # exponentially weighted average of squared gradients
669
+ s = 0 # exponentially weighted average of squared updates
670
+ for i in range(steps - 1):
671
+ g = f_prime(x)
672
+ v = beta * v + (1 - beta) * g ** 2
673
+ delta_x = - (np.sqrt(s + epsilon) / np.sqrt(v + epsilon)) * g
674
+ x = x + delta_x
675
+
676
+ s = beta * s + (1 - beta) * delta_x ** 2
677
+
678
+ x_values.append(x)
679
+ y_values.append(f(x))
680
+
681
+ return {
682
+ "x": x_values,
683
+ "y": y_values,
684
+ }
685
+
686
+
687
+ def adadelta_bivariate(
688
+ function: Expr,
689
+ x0: float,
690
+ y0: float,
691
+ learning_rate: float,
692
+ beta: float,
693
+ epsilon: float,
694
+ steps: int,
695
+ ) -> dict:
696
+ f = lambdify(('x', 'y'), function, modules=['numpy'])
697
+ fx = lambdify(('x', 'y'), function.diff('x'), modules=['numpy'])
698
+ fy = lambdify(('x', 'y'), function.diff('y'), modules=['numpy'])
699
+
700
+ x_values = [x0]
701
+ y_values = [y0]
702
+ z_values = [f(x0, y0)]
703
+
704
+ x = x0
705
+ y = y0
706
+ # exponentially weighted average of squared gradients
707
+ vx = 0
708
+ vy = 0
709
+ # exponentially weighted average of squared updates
710
+ sx = 0
711
+ sy = 0
712
+ for i in range(steps - 1):
713
+ gx = fx(x, y)
714
+ gy = fy(x, y)
715
+ vx = beta * vx + (1 - beta) * gx ** 2
716
+ vy = beta * vy + (1 - beta) * gy ** 2
717
+
718
+ delta_x = - (np.sqrt(sx + epsilon) / np.sqrt(vx + epsilon)) * gx
719
+ delta_y = - (np.sqrt(sy + epsilon) / np.sqrt(vy + epsilon)) * gy
720
+
721
+ x = x + delta_x
722
+ y = y + delta_y
723
+
724
+ sx = beta * sx + (1 - beta) * delta_x ** 2
725
+ sy = beta * sy + (1 - beta) * delta_y ** 2
726
+
727
+ x_values.append(x)
728
+ y_values.append(y)
729
+ z_values.append(f(x, y))
730
+
731
+ return {
732
+ "x": x_values,
733
+ "y": y_values,
734
+ "z": z_values,
735
+ }
736
+
737
+
738
+ def adam_univariate(
739
+ function: Expr,
740
+ x0: float,
741
+ learning_rate: float,
742
+ beta1: float,
743
+ beta2: float,
744
+ epsilon: float,
745
+ steps: int,
746
+ ) -> dict:
747
+ f = lambdify('x', function, modules=['numpy'])
748
+ f_prime = lambdify('x', function.diff('x'), modules=['numpy'])
749
+
750
+ x_values = [x0]
751
+ y_values = [f(x0)]
752
+
753
+ x = x0
754
+ m = 0 # first moment
755
+ v = 0 # second moment
756
+ for i in range(steps - 1):
757
+ g = f_prime(x)
758
+ m = beta1 * m + (1 - beta1) * g
759
+ v = beta2 * v + (1 - beta2) * g ** 2
760
+
761
+ m_hat = m / (1 - beta1 ** (i + 1))
762
+ v_hat = v / (1 - beta2 ** (i + 1))
763
+
764
+ x = x - (learning_rate / (np.sqrt(v_hat) + epsilon)) * m_hat
765
+
766
+ x_values.append(x)
767
+ y_values.append(f(x))
768
+
769
+ return {
770
+ "x": x_values,
771
+ "y": y_values,
772
+ }
773
+
774
+
775
+ def adam_bivariate(
776
+ function: Expr,
777
+ x0: float,
778
+ y0: float,
779
+ learning_rate: float,
780
+ beta1: float,
781
+ beta2: float,
782
+ epsilon: float,
783
+ steps: int,
784
+ ) -> dict:
785
+ f = lambdify(('x', 'y'), function, modules=['numpy'])
786
+ fx = lambdify(('x', 'y'), function.diff('x'), modules=['numpy'])
787
+ fy = lambdify(('x', 'y'), function.diff('y'), modules=['numpy'])
788
+
789
+ x_values = [x0]
790
+ y_values = [y0]
791
+ z_values = [f(x0, y0)]
792
+
793
+ x = x0
794
+ y = y0
795
+ # first moments
796
+ mx = 0
797
+ my = 0
798
+ # second moments
799
+ vx = 0
800
+ vy = 0
801
+ for i in range(steps - 1):
802
+ gx = fx(x, y)
803
+ gy = fy(x, y)
804
+
805
+ mx = beta1 * mx + (1 - beta1) * gx
806
+ my = beta1 * my + (1 - beta1) * gy
807
+
808
+ vx = beta2 * vx + (1 - beta2) * gx ** 2
809
+ vy = beta2 * vy + (1 - beta2) * gy ** 2
810
+
811
+ mx_hat = mx / (1 - beta1 ** (i + 1))
812
+ my_hat = my / (1 - beta1 ** (i + 1))
813
+
814
+ vx_hat = vx / (1 - beta2 ** (i + 1))
815
+ vy_hat = vy / (1 - beta2 ** (i + 1))
816
+
817
+ x = x - (learning_rate / (np.sqrt(vx_hat) + epsilon)) * mx_hat
818
+ y = y - (learning_rate / (np.sqrt(vy_hat) + epsilon)) * my_hat
819
+
820
+ x_values.append(x)
821
+ y_values.append(y)
822
+ z_values.append(f(x, y))
823
+
824
+ return {
825
+ "x": x_values,
826
+ "y": y_values,
827
+ "z": z_values,
828
+ }`;const o="https://cdn.jsdelivr.net/pyodide/v0.26.1/full/pyodide.mjs";let e=null,t=null;async function f(){const{loadPyodide:n}=await import(o);e=await n({indexURL:"https://cdn.jsdelivr.net/pyodide/v0.26.1/full/"}),await e.loadPackage(["numpy","sympy"]),e.FS.writeFile("optimization_logic.py",l),e.FS.writeFile("optimization_manager.py",i),e.runPython("from optimization_manager import OptimizationManager; manager = OptimizationManager();"),t=e.globals.get("manager"),t||console.error("Failed to initialize optimization manager"),self.postMessage({type:"READY"})}function s(n){if(!n)return null;try{const a=n.toJs({dict_converter:Object.fromEntries});n.destroy&&n.destroy(),self.postMessage({type:"RESULT",data:a})}catch(a){console.error("Error handling Python result:",a)}}self.onmessage=async n=>{const a=n.data;if(!t){console.warn("Pyodide is not ready yet");return}switch(a.type){case"INIT":const r=e.toPy(a.settings);s(t.handle_update_settings(r));break;case"NEXT_STEP":s(t.handle_next_step());break;case"PREV_STEP":s(t.handle_prev_step());break;case"RESET":s(t.handle_reset());break;default:console.error("Unknown message type:",a);break}},f()})();
dist/index.html ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!doctype html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <link rel="icon" type="image/svg+xml" href="/vite.svg" />
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
+ <title>Optimization</title>
8
+ <script type="module" crossorigin src="/assets/index-BSZlS5Yr.js"></script>
9
+ <link rel="stylesheet" crossorigin href="/assets/index-BE9C_h4C.css">
10
+ </head>
11
+ <body>
12
+ <div id="root"></div>
13
+ </body>
14
+ </html>
dist/vite.svg ADDED
frontends/react/vite.config.ts CHANGED
@@ -4,16 +4,12 @@ import tailwindcss from '@tailwindcss/vite'
4
 
5
  // https://vite.dev/config/
6
  export default defineConfig({
7
- base: './',
8
  plugins: [
9
  react(),
10
  tailwindcss(),
11
  ],
12
- assetsInclude: ['**/*.py'],
13
- server: {
14
- fs: {
15
- // allow importing from ../../
16
- allow: ['..'],
17
- }
18
  }
19
  })
 
4
 
5
  // https://vite.dev/config/
6
  export default defineConfig({
 
7
  plugins: [
8
  react(),
9
  tailwindcss(),
10
  ],
11
+ build: {
12
+ outDir: "../../dist",
13
+ emptyOutDir: true,
 
 
 
14
  }
15
  })