Spaces:
Running
Running
Commit
·
a1e142a
1
Parent(s):
395823a
Parametrize probability weights
Browse files
README.md
CHANGED
|
@@ -86,21 +86,9 @@ Then just specify the operator names in your call, as above.
|
|
| 86 |
You can also change the dataset learned on by passing in `X` and `y` as
|
| 87 |
numpy arrays to `eureqa(...)`.
|
| 88 |
|
| 89 |
-
One can also adjust the relative probabilities of each operation
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
weights = [8, 1, 1, 1, 0.1, 0.5, 2]
|
| 93 |
-
```
|
| 94 |
-
for:
|
| 95 |
-
|
| 96 |
-
1. Perturb constant
|
| 97 |
-
2. Mutate operator
|
| 98 |
-
3. Append a node
|
| 99 |
-
4. Delete a subtree
|
| 100 |
-
5. Simplify equation
|
| 101 |
-
6. Randomize completely
|
| 102 |
-
7. Do nothing
|
| 103 |
-
|
| 104 |
|
| 105 |
# TODO
|
| 106 |
|
|
|
|
| 86 |
You can also change the dataset learned on by passing in `X` and `y` as
|
| 87 |
numpy arrays to `eureqa(...)`.
|
| 88 |
|
| 89 |
+
One can also adjust the relative probabilities of each mutation operation
|
| 90 |
+
with the `weight...` parameters to `eureqa(...).
|
| 91 |
+
inside `eureqa.jl`.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
# TODO
|
| 94 |
|
eureqa.jl
CHANGED
|
@@ -15,7 +15,7 @@ function debug(verbosity, string...)
|
|
| 15 |
verbosity > 0 ? println(string...) : nothing
|
| 16 |
end
|
| 17 |
|
| 18 |
-
function
|
| 19 |
return round(Int32, 1e3*(time()-1.6e9))
|
| 20 |
end
|
| 21 |
|
|
@@ -326,17 +326,17 @@ end
|
|
| 326 |
# exp(-delta/T) defines probability of accepting a change
|
| 327 |
function iterate(
|
| 328 |
tree::Node, T::Float32,
|
| 329 |
-
alpha::Float32=1.0f0;
|
| 330 |
annealing::Bool=true
|
| 331 |
)::Node
|
| 332 |
prev = tree
|
| 333 |
tree = deepcopy(tree)
|
| 334 |
|
| 335 |
mutationChoice = rand()
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
|
|
|
| 340 |
n = countNodes(tree)
|
| 341 |
|
| 342 |
if mutationChoice < cweights[1]
|
|
@@ -386,8 +386,8 @@ mutable struct PopMember
|
|
| 386 |
score::Float32
|
| 387 |
birth::Int32
|
| 388 |
|
| 389 |
-
PopMember(t::Node) = new(t, scoreFunc(t),
|
| 390 |
-
PopMember(t::Node, score::Float32) = new(t, score,
|
| 391 |
|
| 392 |
end
|
| 393 |
|
|
@@ -429,10 +429,10 @@ function iterateSample(
|
|
| 429 |
allstar = bestOfSample(pop)
|
| 430 |
new = iterate(
|
| 431 |
allstar.tree, T,
|
| 432 |
-
|
| 433 |
allstar.tree = new
|
| 434 |
allstar.score = scoreFunc(new)
|
| 435 |
-
allstar.birth =
|
| 436 |
return allstar
|
| 437 |
end
|
| 438 |
|
|
@@ -530,7 +530,7 @@ function optimizeConstants(member::PopMember)::PopMember
|
|
| 530 |
if Optim.converged(result)
|
| 531 |
setConstants(member.tree, result.minimizer)
|
| 532 |
member.score = convert(Float32, result.minimum)
|
| 533 |
-
member.birth =
|
| 534 |
else
|
| 535 |
setConstants(member.tree, x0)
|
| 536 |
end
|
|
|
|
| 15 |
verbosity > 0 ? println(string...) : nothing
|
| 16 |
end
|
| 17 |
|
| 18 |
+
function getTime()::Int32
|
| 19 |
return round(Int32, 1e3*(time()-1.6e9))
|
| 20 |
end
|
| 21 |
|
|
|
|
| 326 |
# exp(-delta/T) defines probability of accepting a change
|
| 327 |
function iterate(
|
| 328 |
tree::Node, T::Float32,
|
|
|
|
| 329 |
annealing::Bool=true
|
| 330 |
)::Node
|
| 331 |
prev = tree
|
| 332 |
tree = deepcopy(tree)
|
| 333 |
|
| 334 |
mutationChoice = rand()
|
| 335 |
+
weightAdjustmentMutateConstant = min(8, countConstants(tree))/8.0
|
| 336 |
+
cur_weights = deepcopy(mutationWeights) .* 1.0
|
| 337 |
+
cur_weights[1] *= weightAdjustmentMutateConstant
|
| 338 |
+
cur_weights /= sum(cur_weights)
|
| 339 |
+
cweights = cumsum(cur_weights)
|
| 340 |
n = countNodes(tree)
|
| 341 |
|
| 342 |
if mutationChoice < cweights[1]
|
|
|
|
| 386 |
score::Float32
|
| 387 |
birth::Int32
|
| 388 |
|
| 389 |
+
PopMember(t::Node) = new(t, scoreFunc(t), getTime())
|
| 390 |
+
PopMember(t::Node, score::Float32) = new(t, score, getTime())
|
| 391 |
|
| 392 |
end
|
| 393 |
|
|
|
|
| 429 |
allstar = bestOfSample(pop)
|
| 430 |
new = iterate(
|
| 431 |
allstar.tree, T,
|
| 432 |
+
annealing=annealing)
|
| 433 |
allstar.tree = new
|
| 434 |
allstar.score = scoreFunc(new)
|
| 435 |
+
allstar.birth = getTime()
|
| 436 |
return allstar
|
| 437 |
end
|
| 438 |
|
|
|
|
| 530 |
if Optim.converged(result)
|
| 531 |
setConstants(member.tree, result.minimizer)
|
| 532 |
member.score = convert(Float32, result.minimum)
|
| 533 |
+
member.birth = getTime()
|
| 534 |
else
|
| 535 |
setConstants(member.tree, x0)
|
| 536 |
end
|
eureqa.py
CHANGED
|
@@ -7,15 +7,22 @@ import pandas as pd
|
|
| 7 |
|
| 8 |
|
| 9 |
def eureqa(X=None, y=None, threads=4, parsimony=1e-3, alpha=10,
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
):
|
| 20 |
""" Runs symbolic regression in Julia, to fit y given X.
|
| 21 |
Either provide a 2D numpy array for X, 1D array for y, or declare a test to run.
|
|
@@ -74,6 +81,12 @@ def eureqa(X=None, y=None, threads=4, parsimony=1e-3, alpha=10,
|
|
| 74 |
eval_str = "np.sign(X[:, 2])*np.abs(X[:, 2])**2.5 + 5*np.cos(X[:, 3]) - 5"
|
| 75 |
elif test == 'simple2':
|
| 76 |
eval_str = "np.sign(X[:, 2])*np.abs(X[:, 2])**3.5 + 1/np.abs(X[:, 0])"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
X = np.random.randn(100, 5)*3
|
| 79 |
y = eval(eval_str)
|
|
@@ -93,6 +106,15 @@ def eureqa(X=None, y=None, threads=4, parsimony=1e-3, alpha=10,
|
|
| 93 |
const shouldOptimizeConstants = {'true' if shouldOptimizeConstants else 'false'}
|
| 94 |
const hofFile = "{equation_file}"
|
| 95 |
const nthreads = {threads:d}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
"""
|
| 97 |
|
| 98 |
assert len(X.shape) == 2
|
|
|
|
| 7 |
|
| 8 |
|
| 9 |
def eureqa(X=None, y=None, threads=4, parsimony=1e-3, alpha=10,
|
| 10 |
+
maxsize=20, migration=True,
|
| 11 |
+
hofMigration=True, fractionReplacedHof=0.1,
|
| 12 |
+
shouldOptimizeConstants=True,
|
| 13 |
+
binary_operators=["plus", "mult"],
|
| 14 |
+
unary_operators=["cos", "exp", "sin"],
|
| 15 |
+
niterations=20, npop=100, annealing=True,
|
| 16 |
+
ncyclesperiteration=5000, fractionReplaced=0.1,
|
| 17 |
+
topn=10, equation_file='hall_of_fame.csv',
|
| 18 |
+
test='simple1',
|
| 19 |
+
weightMutateConstant=4.0,
|
| 20 |
+
weightMutateOperator=0.5,
|
| 21 |
+
weightAddNode=0.5,
|
| 22 |
+
weightDeleteNode=0.5,
|
| 23 |
+
weightSimplify=0.05,
|
| 24 |
+
weightRandomize=0.25,
|
| 25 |
+
weightDoNothing=1.0,
|
| 26 |
):
|
| 27 |
""" Runs symbolic regression in Julia, to fit y given X.
|
| 28 |
Either provide a 2D numpy array for X, 1D array for y, or declare a test to run.
|
|
|
|
| 81 |
eval_str = "np.sign(X[:, 2])*np.abs(X[:, 2])**2.5 + 5*np.cos(X[:, 3]) - 5"
|
| 82 |
elif test == 'simple2':
|
| 83 |
eval_str = "np.sign(X[:, 2])*np.abs(X[:, 2])**3.5 + 1/np.abs(X[:, 0])"
|
| 84 |
+
elif test == 'simple3':
|
| 85 |
+
eval_str = "np.exp(X[:, 0]/2) + 12.0 + np.log(np.abs(X[:, 0])*10 + 1)"
|
| 86 |
+
elif test == 'simple4':
|
| 87 |
+
eval_str = "1.0 + 3*X[:, 0]**2 - 0.5*X[:, 0]**3 + 0.1*X[:, 0]**4"
|
| 88 |
+
elif test == 'simple5':
|
| 89 |
+
eval_str = "(np.exp(X[:, 3]) + 3)/(X[:, 1] + np.cos(X[:, 0]))"
|
| 90 |
|
| 91 |
X = np.random.randn(100, 5)*3
|
| 92 |
y = eval(eval_str)
|
|
|
|
| 106 |
const shouldOptimizeConstants = {'true' if shouldOptimizeConstants else 'false'}
|
| 107 |
const hofFile = "{equation_file}"
|
| 108 |
const nthreads = {threads:d}
|
| 109 |
+
const mutationWeights = [
|
| 110 |
+
{weightMutateConstant:f},
|
| 111 |
+
{weightMutateOperator:f},
|
| 112 |
+
{weightAddNode:f},
|
| 113 |
+
{weightDeleteNode:f},
|
| 114 |
+
{weightSimplify:f},
|
| 115 |
+
{weightRandomize:f},
|
| 116 |
+
{weightDoNothing:f}
|
| 117 |
+
]
|
| 118 |
"""
|
| 119 |
|
| 120 |
assert len(X.shape) == 2
|
operators.jl
CHANGED
|
@@ -3,3 +3,4 @@ plus(x::Float32, y::Float32)::Float32 = x+y
|
|
| 3 |
mult(x::Float32, y::Float32)::Float32 = x*y;
|
| 4 |
pow(x::Float32, y::Float32)::Float32 = sign(x)*abs(x)^y;
|
| 5 |
div(x::Float32, y::Float32)::Float32 = x/y;
|
|
|
|
|
|
| 3 |
mult(x::Float32, y::Float32)::Float32 = x*y;
|
| 4 |
pow(x::Float32, y::Float32)::Float32 = sign(x)*abs(x)^y;
|
| 5 |
div(x::Float32, y::Float32)::Float32 = x/y;
|
| 6 |
+
log(x::Float32)::Float32 = log(abs(x) + 1f-9);
|