Skip to content
Snippets Groups Projects

LearnerND scale output values before computing loss

Merged Jorn Hoofwijk requested to merge 78-scale-output-values into master
All threads resolved!
Compare and Show latest version
2 files
+ 46
12
Compare changes
  • Side-by-side
  • Inline
Files
2
@@ -176,7 +176,7 @@ class LearnerND(BaseLearner):
self._subtriangulations = dict() # simplex -> triangulation
# scale to unit hypercube
# for the input
# for the input
self._transform = np.linalg.inv(np.diag(np.diff(bounds).flat))
# for the output
self._min_value = None
@@ -450,7 +450,7 @@ class LearnerND(BaseLearner):
values = [self.data[tuple(v)] for v in vertices]
# scale them to a cube with sides 1
vertices = vertices @ self._transform
vertices = vertices @ self._transform
values = self._output_multiplier * values
# compute the loss on the scaled simplex
@@ -461,7 +461,7 @@ class LearnerND(BaseLearner):
# amortized O(N) complexity
t_start = time.time()
if self.tri is None:
return
return
# reset the _simplex_queue
self._simplex_queue = []
@@ -470,7 +470,7 @@ class LearnerND(BaseLearner):
for simplex in self.tri.simplices:
loss = self.compute_loss(simplex)
self._losses[simplex] = loss
# now distribute it around the the children if they are present
if simplex not in self._subtriangulations:
heapq.heappush(self._simplex_queue, (-loss, simplex, None))
@@ -478,7 +478,7 @@ class LearnerND(BaseLearner):
self._update_subsimplex_losses(
simplex, self._subtriangulations[simplex].simplices)
dt = time.time() - t_start
dt = time.time() - t_start
print(f"time spend recomputing loss={dt:.3f}")
@property
@@ -506,7 +506,10 @@ class LearnerND(BaseLearner):
# never scale by multiplying with some huge number
if isinstance(c, float):
c = np.array([c], dtype=float)
c[c > 1e10] = 1
abs_max = np.max(np.abs([self._min_value, self._max_value]), axis=0)
c[new_scale / abs_max < 1e-10] = 1
self._output_multiplier = c
if scale_factor > self._recompute_loss_whenever_scale_changes_more_than:
Loading