Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
adaptive
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container Registry
Model registry
Operate
Environments
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
This is an archived project. Repository and other project resources are read-only.
Show more breadcrumbs
Quantum Tinkerer
adaptive
Commits
c589a2f6
Commit
c589a2f6
authored
6 years ago
by
Joseph Weston
Committed by
Bas Nijholt
6 years ago
Browse files
Options
Downloads
Patches
Plain Diff
refactor adding all loss functions to tests involving a learner
parent
ef227b84
No related branches found
No related tags found
1 merge request
!135
test all the different loss functions in each test
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
adaptive/tests/test_learners.py
+45
-24
45 additions, 24 deletions
adaptive/tests/test_learners.py
with
45 additions
and
24 deletions
adaptive/tests/test_learners.py
+
45
−
24
View file @
c589a2f6
...
...
@@ -28,6 +28,26 @@ except ModuleNotFoundError:
SKOptLearner
=
None
LOSS_FUNCTIONS
=
{
Learner1D
:
(
'
loss_per_interval
'
,
(
adaptive
.
learner
.
learner1D
.
default_loss
,
adaptive
.
learner
.
learner1D
.
uniform_loss
,
adaptive
.
learner
.
learner1D
.
curvature_loss_function
(),
)),
Learner2D
:
(
'
loss_per_triangle
'
,
(
adaptive
.
learner
.
learner2D
.
default_loss
,
adaptive
.
learner
.
learner2D
.
uniform_loss
,
adaptive
.
learner
.
learner2D
.
minimize_triangle_surface_loss
,
adaptive
.
learner
.
learner2D
.
resolution_loss_function
(),
)),
LearnerND
:
(
'
loss_per_simplex
'
,
(
adaptive
.
learner
.
learnerND
.
default_loss
,
adaptive
.
learner
.
learnerND
.
std_loss
,
adaptive
.
learner
.
learnerND
.
uniform_loss
,
)),
}
def
generate_random_parametrization
(
f
):
"""
Return a realization of
'
f
'
with parameters bound to random values.
...
...
@@ -75,38 +95,26 @@ def maybe_skip(learner):
# All parameters except the first must be annotated with a callable that
# returns a random value for that parameter.
@learn_with
(
Learner1D
,
bounds
=
(
-
1
,
1
),
loss_per_interval
=
adaptive
.
learner
.
learner1D
.
default_loss
)
@learn_with
(
Learner1D
,
bounds
=
(
-
1
,
1
),
loss_per_interval
=
adaptive
.
learner
.
learner1D
.
uniform_loss
)
@learn_with
(
Learner1D
,
bounds
=
(
-
1
,
1
),
loss_per_interval
=
adaptive
.
learner
.
learner1D
.
curvature_loss_function
())
@learn_with
(
Learner1D
,
bounds
=
(
-
1
,
1
))
def
quadratic
(
x
,
m
:
uniform
(
0
,
10
),
b
:
uniform
(
0
,
1
)):
return
m
*
x
**
2
+
b
@learn_with
(
Learner1D
,
bounds
=
(
-
1
,
1
),
loss_per_interval
=
adaptive
.
learner
.
learner1D
.
default_loss
)
@learn_with
(
Learner1D
,
bounds
=
(
-
1
,
1
),
loss_per_interval
=
adaptive
.
learner
.
learner1D
.
uniform_loss
)
@learn_with
(
Learner1D
,
bounds
=
(
-
1
,
1
),
loss_per_interval
=
adaptive
.
learner
.
learner1D
.
curvature_loss_function
())
@learn_with
(
Learner1D
,
bounds
=
(
-
1
,
1
))
def
linear_with_peak
(
x
,
d
:
uniform
(
-
1
,
1
)):
a
=
0.01
return
x
+
a
**
2
/
(
a
**
2
+
(
x
-
d
)
**
2
)
@learn_with
(
LearnerND
,
bounds
=
((
-
1
,
1
),
(
-
1
,
1
)),
loss_per_simplex
=
adaptive
.
learner
.
learnerND
.
default_loss
)
@learn_with
(
LearnerND
,
bounds
=
((
-
1
,
1
),
(
-
1
,
1
)),
loss_per_simplex
=
adaptive
.
learner
.
learnerND
.
std_loss
)
@learn_with
(
LearnerND
,
bounds
=
((
-
1
,
1
),
(
-
1
,
1
)),
loss_per_simplex
=
adaptive
.
learner
.
learnerND
.
uniform_loss
)
@learn_with
(
Learner2D
,
bounds
=
((
-
1
,
1
),
(
-
1
,
1
)),
loss_per_triangle
=
adaptive
.
learner
.
learner2D
.
default_loss
)
@learn_with
(
Learner2D
,
bounds
=
((
-
1
,
1
),
(
-
1
,
1
)),
loss_per_triangle
=
adaptive
.
learner
.
learner2D
.
uniform_loss
)
@learn_with
(
Learner2D
,
bounds
=
((
-
1
,
1
),
(
-
1
,
1
)),
loss_per_triangle
=
adaptive
.
learner
.
learner2D
.
minimize_triangle_surface_loss
)
@learn_with
(
Learner2D
,
bounds
=
((
-
1
,
1
),
(
-
1
,
1
)),
loss_per_triangle
=
adaptive
.
learner
.
learner2D
.
resolution_loss_function
())
@learn_with
(
LearnerND
,
bounds
=
((
-
1
,
1
),
(
-
1
,
1
)))
@learn_with
(
Learner2D
,
bounds
=
((
-
1
,
1
),
(
-
1
,
1
)))
def
ring_of_fire
(
xy
,
d
:
uniform
(
0.2
,
1
)):
a
=
0.2
x
,
y
=
xy
return
x
+
math
.
exp
(
-
(
x
**
2
+
y
**
2
-
d
**
2
)
**
2
/
a
**
4
)
@learn_with
(
LearnerND
,
bounds
=
((
-
1
,
1
),
(
-
1
,
1
),
(
-
1
,
1
)),
loss_per_simplex
=
adaptive
.
learner
.
learnerND
.
default_loss
)
@learn_with
(
LearnerND
,
bounds
=
((
-
1
,
1
),
(
-
1
,
1
),
(
-
1
,
1
)),
loss_per_simplex
=
adaptive
.
learner
.
learnerND
.
std_loss
)
@learn_with
(
LearnerND
,
bounds
=
((
-
1
,
1
),
(
-
1
,
1
),
(
-
1
,
1
)),
loss_per_simplex
=
adaptive
.
learner
.
learnerND
.
uniform_loss
)
@learn_with
(
LearnerND
,
bounds
=
((
-
1
,
1
),
(
-
1
,
1
),
(
-
1
,
1
)))
def
sphere_of_fire
(
xyz
,
d
:
uniform
(
0.2
,
1
)):
a
=
0.2
x
,
y
,
z
=
xyz
...
...
@@ -120,6 +128,17 @@ def gaussian(n):
# Decorators for tests.
# Create a sequence of learner parameters by adding all
# possible loss functions to an existing parameter set.
def
add_loss_to_params
(
learner_type
,
existing_params
):
if
learner_type
not
in
LOSS_FUNCTIONS
:
return
[
existing_params
]
loss_param
,
loss_functions
=
LOSS_FUNCTIONS
[
learner_type
]
loss_params
=
[{
loss_param
:
f
}
for
f
in
loss_functions
]
return
[
dict
(
**
existing_params
,
**
lp
)
for
lp
in
loss_params
]
def
run_with
(
*
learner_types
):
pars
=
[]
for
l
in
learner_types
:
...
...
@@ -127,13 +146,15 @@ def run_with(*learner_types):
if
has_marker
:
marker
,
l
=
l
for
f
,
k
in
learner_function_combos
[
l
]:
# Check if learner was marked with our `xfail` decorator
# XXX: doesn't work when feeding kwargs to xfail.
if
has_marker
:
pars
.
append
(
pytest
.
param
(
l
,
f
,
dict
(
k
),
marks
=
[
marker
]))
else
:
pars
.
append
((
l
,
f
,
dict
(
k
)))
ks
=
add_loss_to_params
(
l
,
k
)
for
k
in
ks
:
# Check if learner was marked with our `xfail` decorator
# XXX: doesn't work when feeding kwargs to xfail.
if
has_marker
:
pars
.
append
(
pytest
.
param
(
l
,
f
,
dict
(
k
),
marks
=
[
marker
]))
else
:
pars
.
append
((
l
,
f
,
dict
(
k
)))
return
pytest
.
mark
.
parametrize
(
'
learner_type, f, learner_kwargs
'
,
pars
)
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment