Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
adaptive
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container registry
Model registry
Operate
Environments
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
This is an archived project. Repository and other project resources are read-only.
Show more breadcrumbs
Quantum Tinkerer
adaptive
Compare revisions
3bd66543cef41a64e62768efbc2ea45aa6aaee6c to f846e1fb7ba9362ae58302063d68754831b9f8ec
Compare revisions
Changes are shown as if the
source
revision was being merged into the
target
revision.
Learn more about comparing revisions.
Source
qt/adaptive
Select target project
No results found
f846e1fb7ba9362ae58302063d68754831b9f8ec
Select Git revision
Swap
Target
qt/adaptive
Select target project
No results found
3bd66543cef41a64e62768efbc2ea45aa6aaee6c
Select Git revision
Show changes
Only incoming changes from source
Include changes to target since source was created
Compare
Commits on Source (4)
2D: rename 'learner._interp' to 'learner.pending_points' as in other learners
· b034e437
Bas Nijholt
authored
6 years ago
b034e437
remove test from 'test_learners.py' that is already in 'test_learnernd.py'
· f24018d1
Bas Nijholt
authored
6 years ago
f24018d1
move 'SKOptLearner' test to 'test_skopt_learner.py'
· 896b7b4e
Bas Nijholt
authored
6 years ago
896b7b4e
Add a test for the 'BalancingLearner' with various learners, closes
#102
'
· f846e1fb
Bas Nijholt
authored
6 years ago
f846e1fb
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
adaptive/learner/learner2D.py
+13
-10
13 additions, 10 deletions
adaptive/learner/learner2D.py
adaptive/tests/test_learners.py
+32
-42
32 additions, 42 deletions
adaptive/tests/test_learners.py
adaptive/tests/test_skopt_learner.py
+32
-0
32 additions, 0 deletions
adaptive/tests/test_skopt_learner.py
with
77 additions
and
52 deletions
adaptive/learner/learner2D.py
View file @
f846e1fb
...
...
@@ -166,6 +166,9 @@ class Learner2D(BaseLearner):
----------
data : dict
Sampled points and values.
pending_points : set
Points that still have to be evaluated and are currently
interpolated, see `data_combined`.
stack_size : int, default 10
The size of the new candidate points stack. Set it to 1
to recalculate the best points at each call to `ask`.
...
...
@@ -180,7 +183,7 @@ class Learner2D(BaseLearner):
-------
data_combined : dict
Sampled points and values so far including
the unknown interpolated
ones
.
the unknown interpolated
points in `pending_points`
.
Notes
-----
...
...
@@ -217,7 +220,7 @@ class Learner2D(BaseLearner):
self
.
bounds
=
tuple
((
float
(
a
),
float
(
b
))
for
a
,
b
in
bounds
)
self
.
data
=
OrderedDict
()
self
.
_stack
=
OrderedDict
()
self
.
_interp
=
set
()
self
.
pending_points
=
set
()
self
.
xy_mean
=
np
.
mean
(
self
.
bounds
,
axis
=
1
)
self
.
_xy_scale
=
np
.
ptp
(
self
.
bounds
,
axis
=
1
)
...
...
@@ -263,14 +266,14 @@ class Learner2D(BaseLearner):
@property
def
bounds_are_done
(
self
):
return
not
any
((
p
in
self
.
_interp
or
p
in
self
.
_stack
)
return
not
any
((
p
in
self
.
pending_points
or
p
in
self
.
_stack
)
for
p
in
self
.
_bounds_points
)
def
data_combined
(
self
):
# Interpolate the unfinished points
data_combined
=
copy
(
self
.
data
)
if
self
.
_interp
:
points_interp
=
list
(
self
.
_interp
)
if
self
.
pending_points
:
points_interp
=
list
(
self
.
pending_points
)
if
self
.
bounds_are_done
:
values_interp
=
self
.
ip
()(
self
.
_scale
(
points_interp
))
else
:
...
...
@@ -303,17 +306,17 @@ class Learner2D(BaseLearner):
point
=
tuple
(
point
)
if
value
is
None
:
self
.
_interp
.
add
(
point
)
self
.
pending_points
.
add
(
point
)
self
.
_ip_combined
=
None
else
:
self
.
data
[
point
]
=
value
self
.
_interp
.
discard
(
point
)
self
.
pending_points
.
discard
(
point
)
self
.
_ip
=
None
self
.
_stack
.
pop
(
point
,
None
)
def
_fill_stack
(
self
,
stack_till
=
1
):
if
len
(
self
.
data
)
+
len
(
self
.
_interp
)
<
self
.
ndim
+
1
:
if
len
(
self
.
data
)
+
len
(
self
.
pending_points
)
<
self
.
ndim
+
1
:
raise
ValueError
(
"
too few points...
"
)
# Interpolate
...
...
@@ -366,7 +369,7 @@ class Learner2D(BaseLearner):
self
.
_stack
=
OrderedDict
(
zip
(
points
[:
self
.
stack_size
],
loss_improvements
))
for
point
in
points
[:
n
]:
self
.
_interp
.
discard
(
point
)
self
.
pending_points
.
discard
(
point
)
return
points
[:
n
],
loss_improvements
[:
n
]
...
...
@@ -379,7 +382,7 @@ class Learner2D(BaseLearner):
return
self
.
_loss
def
remove_unfinished
(
self
):
self
.
_interp
=
set
()
self
.
pending_points
=
set
()
for
p
in
self
.
_bounds_points
:
if
p
not
in
self
.
data
:
self
.
_stack
[
p
]
=
np
.
inf
...
...
This diff is collapsed.
Click to expand it.
adaptive/tests/test_learners.py
View file @
f846e1fb
...
...
@@ -14,12 +14,6 @@ import pytest
from
..learner
import
*
from
..runner
import
simple
,
replay_log
try
:
import
skopt
with_scikit_optimize
=
True
except
ModuleNotFoundError
:
with_scikit_optimize
=
False
def
generate_random_parametrization
(
f
):
"""
Return a realization of
'
f
'
with parameters bound to random values.
...
...
@@ -129,23 +123,7 @@ def ask_randomly(learner, rounds, points):
return
xs
,
ls
@pytest.mark.skipif
(
not
with_scikit_optimize
,
reason
=
'
scikit-optimize is not installed
'
)
def
test_skopt_learner_runs
():
"""
The SKOptLearner provides very few guarantees about its
behaviour, so we only test the most basic usage
"""
def
g
(
x
,
noise_level
=
0.1
):
return
(
np
.
sin
(
5
*
x
)
*
(
1
-
np
.
tanh
(
x
**
2
))
+
np
.
random
.
randn
()
*
noise_level
)
learner
=
SKOptLearner
(
g
,
dimensions
=
[(
-
2.
,
2.
)])
for
_
in
range
(
11
):
(
x
,),
_
=
learner
.
ask
(
1
)
learner
.
tell
(
x
,
learner
.
function
(
x
))
# Tests
@run_with
(
Learner1D
)
def
test_uniform_sampling1D
(
learner_type
,
f
,
learner_kwargs
):
...
...
@@ -373,6 +351,37 @@ def test_learner_performance_is_invariant_under_scaling(learner_type, f, learner
assert
abs
(
learner
.
loss
()
-
control
.
loss
())
/
learner
.
loss
()
<
1e-11
# XXX: the LearnerND currently fails because there is no `add_data=False` argument in ask.
@run_with
(
Learner1D
,
Learner2D
,
xfail
(
LearnerND
),
AverageLearner
)
def
test_balancing_learner
(
learner_type
,
f
,
learner_kwargs
):
"""
Test if the BalancingLearner works with the different types of learners.
"""
learners
=
[
learner_type
(
generate_random_parametrization
(
f
),
**
learner_kwargs
)
for
i
in
range
(
5
)]
learner
=
BalancingLearner
(
learners
)
# Emulate parallel execution
stash
=
[]
for
i
in
range
(
200
):
xs
,
_
=
learner
.
ask
(
10
)
# Save 5 random points out of `xs` for later
random
.
shuffle
(
xs
)
for
_
in
range
(
5
):
stash
.
append
(
xs
.
pop
())
for
x
in
xs
:
learner
.
tell
(
x
,
learner
.
function
(
x
))
# Evaluate and add 5 random points from `stash`
random
.
shuffle
(
stash
)
for
_
in
range
(
5
):
learner
.
tell
(
stash
.
pop
(),
learner
.
function
(
x
))
assert
all
(
l
.
npoints
>
20
for
l
in
learner
.
learners
)
@pytest.mark.xfail
@run_with
(
Learner1D
,
Learner2D
,
LearnerND
)
def
test_convergence_for_arbitrary_ordering
(
learner_type
,
f
,
learner_kwargs
):
...
...
@@ -392,22 +401,3 @@ def test_learner_subdomain(learner_type, f, learner_kwargs):
perform
'
similarly
'
to learners defined on that subdomain only.
"""
# XXX: not sure how to implement this. How do we measure "performance"?
raise
NotImplementedError
()
def
test_faiure_case_LearnerND
():
log
=
[
(
'
ask
'
,
4
),
(
'
tell
'
,
(
-
1
,
-
1
,
-
1
),
1.607873907219222e-101
),
(
'
tell
'
,
(
-
1
,
-
1
,
1
),
1.607873907219222e-101
),
(
'
ask
'
,
2
),
(
'
tell
'
,
(
-
1
,
1
,
-
1
),
1.607873907219222e-101
),
(
'
tell
'
,
(
-
1
,
1
,
1
),
1.607873907219222e-101
),
(
'
ask
'
,
2
),
(
'
tell
'
,
(
1
,
-
1
,
1
),
2.0
),
(
'
tell
'
,
(
1
,
-
1
,
-
1
),
2.0
),
(
'
ask
'
,
2
),
(
'
tell
'
,
(
0.0
,
0.0
,
0.0
),
4.288304431237686e-06
),
(
'
tell
'
,
(
1
,
1
,
-
1
),
2.0
)
]
learner
=
LearnerND
(
lambda
*
x
:
x
,
bounds
=
[(
-
1
,
1
),
(
-
1
,
1
),
(
-
1
,
1
)])
replay_log
(
learner
,
log
)
This diff is collapsed.
Click to expand it.
adaptive/tests/test_skopt_learner.py
0 → 100644
View file @
f846e1fb
# -*- coding: utf-8 -*-
import
random
import
numpy
as
np
import
pytest
try
:
import
skopt
with_scikit_optimize
=
True
from
..learner
import
SKOptLearner
except
ModuleNotFoundError
:
with_scikit_optimize
=
False
@pytest.mark.skipif
(
not
with_scikit_optimize
,
reason
=
'
scikit-optimize is not installed
'
)
def
test_skopt_learner_runs
():
"""
The SKOptLearner provides very few guarantees about its
behaviour, so we only test the most basic usage
"""
def
g
(
x
,
noise_level
=
0.1
):
return
(
np
.
sin
(
5
*
x
)
*
(
1
-
np
.
tanh
(
x
**
2
))
+
np
.
random
.
randn
()
*
noise_level
)
learner
=
SKOptLearner
(
g
,
dimensions
=
[(
-
2.
,
2.
)])
for
_
in
range
(
11
):
(
x
,),
_
=
learner
.
ask
(
1
)
learner
.
tell
(
x
,
learner
.
function
(
x
))
This diff is collapsed.
Click to expand it.