Unverified Commit 44617453 authored by Stephan Rave's avatar Stephan Rave Committed by GitHub
Browse files

Merge pull request #1113 from pymor/compute

add compute() to Model
parents 8ee85829 a24bde0f
Pipeline #65870 passed with stages
in 56 minutes and 41 seconds
......@@ -78,7 +78,7 @@ Solving the Model
Now that we have our FOM and a reduced space :math:`V_N` spanned by `basis`, we can project
the |Model|. However, before doing so, we need to understand how actually
solving the FOM works. Let's take a look at what
:meth:`~pymor.models.interface.Model.solve` actually does:
:meth:`~pymor.models.interface.Model.solve` does:
.. jupyter-execute::
......@@ -86,19 +86,26 @@ solving the FOM works. Let's take a look at what
print_source(fom.solve)
This does not look too interesting. Actually, :meth:`~pymor.models.interface.Model.solve`
is just a thin wrapper around the `_solve` method, which performs the actual
computations. All that :meth:`~pymor.models.interface.Model.solve` does is
checking the input |parameter values| `mu` and :mod:`caching <pymor.core.cache>`
the results. So let's take a look at `_solve`:
is just a convenience method around :meth:`~pymor.models.interface.Model.compute` which
handles the actual computation of the solution and various other associated values like
outputs or error estimates. Next, we take a look at the implemenation of
:meth:`~pymor.models.interface.Model.compute`:
.. jupyter-execute::
print_source(fom._solve)
print_source(fom.compute)
There is some code related to logging and the computation of an output functional.
The interesting line is::
What we see is a default implementation from :class:`~pymor.models.interface.Model` that
takes care of checking the input |parameter values| `mu`, :mod:`caching <pymor.core.cache>` and
:mod:`logging <pymor.core.logger>`, but defers the actual computations to further private methods.
Implementors can directly implement :meth:`~pymor.models.interface.Model._compute` to compute
multiple return values at once in an optimized way. Our given model, however, just implements
:meth:`~pymor.models.interface.Model._compute_solution` where we can find the
actual code:
U = self.operator.apply_inverse(self.rhs.as_range_array(mu), mu=mu)
.. jupyter-execute::
print_source(fom._compute_solution)
What does this mean? If we look at the type of `fom`,
......@@ -160,12 +167,13 @@ Let's try solving the model on our own:
That did not work too well! In pyMOR, all parametric objects expect the
`mu` argument to be an instance of the :class:`~pymor.parameters.base.Mu`
class. :meth:`~pymor.models.interface.Model.solve` is an exception: for
convenience, it accepts as a `mu` argument anything that can be converted
class. :meth:`~pymor.models.interface.Model.compute` and related methods
like :meth:`~pymor.models.interface.Model.solve` are an exception: for
convenience, they accept as a `mu` argument anything that can be converted
to a :class:`~pymor.parameters.base.Mu` instance using the
:meth:`~pymor.parameters.base.Parameters.parse` method of the
:class:`~pymor.parameters.base.Parameters` class. In fact, if you look
back at the implementation of :meth:`~pymor.models.interface.Model.solve`,
back at the implementation of :meth:`~pymor.models.interface.Model.compute`,
you see the explicit call to :meth:`~pymor.parameters.base.Parameters.parse`.
We try again:
......
......@@ -333,9 +333,10 @@ def _compute_errors(mu, fom, reductor, error_estimator, error_norms, condition,
for i_N, N in enumerate(basis_sizes):
rom = reductor.reduce(dims={k: N for k in reductor.bases})
u = rom.solve(mu)
result = rom.compute(solution=True, solution_error_estimate=error_estimator, mu=mu)
u = result['solution']
if error_estimator:
e = rom.estimate_error(u, mu)
e = result['solution_error_estimate']
e = e[0] if hasattr(e, '__len__') else e
error_estimates[i_N] = e
if fom and reductor:
......
......@@ -268,7 +268,7 @@ def _rb_surrogate_evaluate(rom=None, fom=None, reductor=None, mus=None, error_no
return -1., None
if fom is None:
errors = [rom.estimate_error(rom.solve(mu), mu) for mu in mus]
errors = [rom.estimate_error(mu) for mu in mus]
elif error_norm is not None:
errors = [error_norm(fom.solve(mu) - reductor.reconstruct(rom.solve(mu))) for mu in mus]
else:
......
......@@ -84,18 +84,8 @@ class StationaryModel(Model):
f' output_space: {self.output_space}\n'
)
def _solve(self, mu=None, return_output=False):
# explicitly checking if logging is disabled saves the str(mu) call
if not self.logging_disabled:
self.logger.info(f'Solving {self.name} for {mu} ...')
U = self.operator.apply_inverse(self.rhs.as_range_array(mu), mu=mu)
if return_output:
if self.output_functional is None:
raise ValueError('Model has no output')
return U, self.output_functional.apply(U, mu=mu)
else:
return U
def _compute_solution(self, mu=None, **kwargs):
return self.operator.apply_inverse(self.rhs.as_range_array(mu), mu=mu)
class InstationaryModel(Model):
......@@ -195,21 +185,12 @@ class InstationaryModel(Model):
def with_time_stepper(self, **kwargs):
return self.with_(time_stepper=self.time_stepper.with_(**kwargs))
def _solve(self, mu=None, return_output=False):
# explicitly checking if logging is disabled saves the expensive str(mu) call
if not self.logging_disabled:
self.logger.info(f'Solving {self.name} for {mu} ...')
def _compute_solution(self, mu=None, **kwargs):
mu = mu.with_(t=0.)
U0 = self.initial_data.as_range_array(mu)
U = self.time_stepper.solve(operator=self.operator, rhs=self.rhs, initial_data=U0, mass=self.mass,
initial_time=0, end_time=self.T, mu=mu, num_values=self.num_values)
if return_output:
if self.output_functional is None:
raise ValueError('Model has no output')
return U, self.output_functional.apply(U, mu=mu)
else:
return U
return U
def to_lti(self):
"""Convert model to |LTIModel|.
......
......@@ -8,6 +8,7 @@ from pymor.operators.constructions import induced_norm
from pymor.parameters.base import ParametricObject, Mu
from pymor.tools.frozendict import FrozenDict
from pymor.tools.deprecated import Deprecated
from pymor.vectorarrays.interface import VectorArray
class Model(CacheableObject, ParametricObject):
......@@ -47,51 +48,312 @@ class Model(CacheableObject, ParametricObject):
self.__auto_init(locals())
@abstractmethod
def _solve(self, mu=None, return_output=False, **kwargs):
"""Perform the actual solving."""
pass
def _compute(self, solution=False, output=False,
solution_error_estimate=False, output_error_estimate=False,
mu=None, **kwargs):
return {}
def solve(self, mu=None, return_output=False, **kwargs):
"""Solve the discrete problem for the |parameter values| `mu`.
def _compute_solution(self, mu=None, **kwargs):
"""Compute the model's solution for |parameter values| `mu`.
The result will be :mod:`cached <pymor.core.cache>`
in case caching has been activated for the given model.
This method is called by the default implementation of :meth:`compute`
in :class:`pymor.models.interface.Model`.
Parameters
----------
mu
|Parameter values| for which to solve.
return_output
If `True`, the model output for the given |parameter values| `mu` is
returned as a |VectorArray| from :attr:`output_space`.
|Parameter values| for which to compute the solution.
kwargs
Additional keyword arguments to customize how the solution is
computed or to select additional data to be returned.
Returns
-------
The solution |VectorArray|. When `return_output` is `True`,
the output |VectorArray| is returned as second value.
|VectorArray| with the computed solution or a dict which at least
must contain the key `'solution'`.
"""
raise NotImplementedError
def _compute_output(self, solution, mu=None, **kwargs):
"""Compute the model's output for |parameter values| `mu`.
This method is called by the default implementation of :meth:`compute`
in :class:`pymor.models.interface.Model`. The assumption is made
that the output is a derived quantity from the model's internal state
as returned my :meth:`_compute_solution`. When this is not the case,
the computation of the output should be implemented in :meth:`_compute`.
.. note::
The default implementation applies the |Operator| given by the
:attr:`output_functional` attribute to the given `solution`
|VectorArray|.
Parameters
----------
solution
Internal model state for the given |parameter values|.
mu
|Parameter values| for which to compute the output.
kwargs
Additional keyword arguments to customize how the output is
computed or to select additional data to be returned.
Returns
-------
|VectorArray| with the computed output or a dict which at least
must contain the key `'output'`.
"""
if not hasattr(self, 'output_functional'):
raise NotImplementedError
if self.output_functional is None:
raise ValueError('Model has no output')
return self.output_functional.apply(solution, mu=mu)
def _compute_solution_error_estimate(self, solution, mu=None, **kwargs):
"""Compute an error estimate for the computed internal state.
This method is called by the default implementation of :meth:`compute`
in :class:`pymor.models.interface.Model`. The assumption is made
that the error estimate is a derived quantity from the model's internal state
as returned my :meth:`_compute_solution`. When this is not the case,
the computation of the error estimate should be implemented in :meth:`_compute`.
.. note::
The default implementation calls the `estimate_error` method of the object
given by the :attr:`error_estimator` attribute, passing `solution`,
`mu`, `self` and `**kwargs`.
Parameters
----------
solution
Internal model state for the given |parameter values|.
mu
|Parameter values| for which to compute the error estimate.
kwargs
Additional keyword arguments to customize how the error estimate is
computed or to select additional data to be returned.
Returns
-------
The computed error estimate or a dict which at least must contain the key
`'solution_error_estimate'`.
"""
if self.error_estimator is None:
raise ValueError('Model has no error estimator')
return self.error_estimator.estimate_error(solution, mu, self, **kwargs)
def _compute_output_error_estimate(self, solution, mu=None, **kwargs):
"""Compute an error estimate for the computed model output.
This method is called by the default implementation of :meth:`compute`
in :class:`pymor.models.interface.Model`. The assumption is made
that the error estimate is a derived quantity from the model's internal state
as returned my :meth:`_compute_solution`. When this is not the case,
the computation of the error estimate should be implemented in :meth:`_compute`.
.. note::
The default implementation calls the `estimate_output_error` method of the object
given by the :attr:`error_estimator` attribute, passing `solution`,
`mu`, `self` and `**kwargs`.
Parameters
----------
solution
Internal model state for the given |parameter values|.
mu
|Parameter values| for which to compute the error estimate.
kwargs
Additional keyword arguments to customize how the error estimate is
computed or to select additional data to be returned.
Returns
-------
The computed error estimate or a dict which at least must contain the key
`'solution_error_estimate'`.
"""
if self.error_estimator is None:
raise ValueError('Model has no error estimator')
return self.error_estimator.estimate_output_error(solution, mu, self, **kwargs)
_compute_allowed_kwargs = frozenset()
def compute(self, solution=False, output=False,
solution_error_estimate=False, output_error_estimate=False, *,
mu=None, **kwargs):
"""Compute the solution of the model and associated quantities.
This methods the output of the model it's internal state
and various associated quantities for given |parameter values|
`mu`.
.. note::
The default implementation defers the actual computations to
the methods :meth:`_compute_solution`, :meth:`_compute_output`,
:meth:`_compute_solution_error_estimate` and :meth:`_compute_output_error_estimate`.
The call to :meth:`_compute_solution` is :mod:`cached <pymor.core.cache>`.
In addition, |Model| implementors may implement :meth:`_compute` to
simultaneously compute multiple values in an optimized way. The corresponding
`_compute_XXX` methods will not be called for values already returned by
:meth:`_compute`.
Parameters
----------
solution
If `True`, return the model's internal state.
output
If `True`, return the model output.
solution_error_estimate
If `True`, return an error estimate for the computed internal state.
output_error_estimate
If `True`, return an error estimate for the computed output.
mu
|Parameter values| for which to compute the values.
kwargs
Further keyword arguments to select further quantities that sould
be returned or to customize how the values are computed.
Returns
-------
A dict with the computed values.
"""
# make sure no unknown kwargs are passed
assert kwargs.keys() <= self._compute_allowed_kwargs
# parse parameter values
if not isinstance(mu, Mu):
mu = self.parameters.parse(mu)
assert self.parameters.assert_compatible(mu)
return self.cached_method_call(self._solve, mu=mu, return_output=return_output, **kwargs)
def output(self, mu=None, **kwargs):
# log output
# explicitly checking if logging is disabled saves some cpu cycles
if not self.logging_disabled:
self.logger.info(f'Solving {self.name} for {mu} ...')
# first call _compute to give subclasses more control
data = self._compute(solution=solution, output=output,
solution_error_estimate=solution_error_estimate,
output_error_estimate=output_error_estimate,
mu=mu, **kwargs)
if (solution or output or solution_error_estimate or output_error_estimate) and \
'solution' not in data:
retval = self.cached_method_call(self._compute_solution, mu=mu, **kwargs)
if isinstance(retval, dict):
assert 'solution' in retval
data.update(retval)
else:
data['solution'] = retval
if output and 'output' not in data:
# TODO use caching here (requires skipping args in key generation)
retval = self._compute_output(data['solution'], mu=mu, **kwargs)
if isinstance(retval, dict):
assert 'output' in retval
data.update(retval)
else:
data['output'] = retval
if solution_error_estimate and 'solution_error_estimate' not in data:
# TODO use caching here (requires skipping args in key generation)
retval = self._compute_solution_error_estimate(data['solution'], mu=mu, **kwargs)
if isinstance(retval, dict):
assert 'solution_error_estimate' in retval
data.update(retval)
else:
data['solution_error_estimate'] = retval
if output_error_estimate and 'output_error_estimate' not in data:
# TODO use caching here (requires skipping args in key generation)
retval = self._compute_output_error_estimate(data['solution'], mu=mu, **kwargs)
if isinstance(retval, dict):
assert 'output_error_estimate' in retval
data.update(retval)
else:
data['output_error_estimate'] = retval
return data
def solve(self, mu=None, return_error_estimate=False, **kwargs):
"""Solve the discrete problem for the |parameter values| `mu`.
This method returns a |VectorArray| with a internal state
representation of the model's solution for given
|parameter values|. It is a convenience wrapper around
:meth:`compute`.
The result may be :mod:`cached <pymor.core.cache>`
in case caching has been activated for the given model.
Parameters
----------
mu
|Parameter values| for which to solve.
return_error_estimate
If `True`, also return an error estimate for the computed solution.
kwargs
Additional keyword arguments passed to :meth:`compute` that
might affect how the solution is computed.
Returns
-------
The solution |VectorArray|. When `return_error_estimate` is `True`,
the estimate is returned as second value.
"""
data = self.compute(
solution=True,
solution_error_estimate=return_error_estimate,
mu=mu,
**kwargs
)
if return_error_estimate:
return data['solution'], data['solution_error_estimate']
else:
return data['solution']
def output(self, mu=None, return_error_estimate=False, **kwargs):
"""Return the model output for given |parameter values| `mu`.
This method is a convenience wrapper around :meth:`compute`.
Parameters
----------
mu
|Parameter values| for which to compute the output.
return_error_estimate
If `True`, also return an error estimate for the computed output.
kwargs
Additional keyword arguments passed to :meth:`compute` that
might affect how the solution is computed.
Returns
-------
The computed model output as a |VectorArray| from `output_space`.
When `return_error_estimate` is `True`, the estimate is returned as
second value.
"""
return self.solve(mu=mu, return_output=True, **kwargs)[1]
data = self.compute(
output=True,
output_error_estimate=return_error_estimate,
mu=mu,
**kwargs
)
if return_error_estimate:
return data['output'], data['output_error_estimate']
else:
return data['output']
def estimate_error(self, U, mu=None):
"""Estimate the model error for a given solution.
def estimate_error(self, mu=None, **kwargs):
"""Estimate the error for the computed internal state.
For given |parameter values| `mu` this method returns an
error estimate for the computed internal model state as returned
by :meth:`solve`. It is a convenience wrapper around
:meth:`compute`.
The model error could be the error w.r.t. the analytical
solution of the given problem or the model reduction error w.r.t.
......@@ -99,35 +361,67 @@ class Model(CacheableObject, ParametricObject):
Parameters
----------
U
The solution obtained by :meth:`~solve`.
mu
|Parameter values| for which `U` has been obtained.
|Parameter values| for which to estimate the error.
kwargs
Additional keyword arguments passed to :meth:`compute` that
might affect how the error estimate (or the solution) is computed.
Returns
-------
The estimated error.
"""
if getattr(self, 'error_estimator') is not None:
return self.error_estimator.estimate_error(U, mu=mu, m=self)
else:
raise NotImplementedError('Model has no error estimator.')
return self.compute(
solution_error_estimate=True,
mu=mu,
**kwargs
)['solution_error_estimate']
@Deprecated('estimate_error')
def estimate(self, U, mu=None):
return self.estimate_error(U, mu)
return self.estimate_error(mu)
def estimate_output_error(self, mu=None, **kwargs):
"""Estimate the error for the computed output.
For given |parameter values| `mu` this method returns an
error estimate for the computed model output as returned
by :meth:`output`. It is a convenience wrapper around
:meth:`compute`.
The output error could be the error w.r.t. the analytical
solution of the given problem or the model reduction error w.r.t.
a corresponding high-dimensional |Model|.
Parameters
----------
mu
|Parameter values| for which to estimate the error.
kwargs
Additional keyword arguments passed to :meth:`compute` that
might affect how the error estimate (or the output) is computed.
Returns
-------
The estimated error.
"""
return self.compute(
output_error_estimate=True,
mu=mu,
**kwargs
)['output_error_estimate']
def visualize(self, U, **kwargs):
"""Visualize a solution |VectorArray| U.
"""Visualize a |VectorArray| U of the model's :attr:`solution_space`.
Parameters
----------
U
The |VectorArray| from
:attr:`~pymor.models.interface.Model.solution_space`
The |VectorArray| from :attr:`solution_space`
that shall be visualized.
kwargs
See docstring of `self.visualizer.visualize`.
Additional keyword arguments to customize the visualization.
See the docstring of `self.visualizer.visualize`.
"""
if getattr(self, 'visualizer') is not None:
return self.visualizer.visualize(U, self, **kwargs)
......
......@@ -45,9 +45,6 @@ class InputOutputModel(Model):
def output_dim(self):
return self.output_space.dim
def _solve(self, mu=None):
raise NotImplementedError
def eval_tf(self, s, mu=None):
"""Evaluate the transfer function."""
raise NotImplementedError
......
......@@ -36,9 +36,9 @@ class MPIModel:
self.parameters_internal = m.parameters_internal
self.visualizer = MPIVisualizer(obj_id)
def _solve(self, mu=None):
def _compute_solution(self, mu=None, **kwargs):
return self.solution_space.make_array(
mpi.call(mpi.method_call_manage, self.obj_id, 'solve', mu=mu)
mpi.call(mpi.method_call_manage, self.obj_id, '_compute_solution', mu=mu, **kwargs)
)
def visualize(self, U, **kwargs):
......
......@@ -62,9 +62,7 @@ if config.HAVE_TORCH:
if output_functional is not None:
self.output_space = output_functional.range
def _solve(self, mu=None, return_output=False):
if not self.logging_disabled:
self.logger.info(f'Solving {self.name} for {mu} ...')
def _compute_solution(self, mu=None, **kwargs):
# convert the parameter `mu` into a form that is usable in PyTorch
converted_input = torch.from_numpy(mu.to_numpy()).double()
......@@ -73,13 +71,7 @@ if config.HAVE_TORCH:
# convert plain numpy array to element of the actual solution space
U = self.solution_space.make_array(U)
if return_output:
if self.output_functional is None:
raise ValueError('Model has no output')
return U, self.output_functional.apply(U, mu=mu)
else:
return U
return U
class FullyConnectedNN(nn.Module, BasicObject):
"""Class for neural networks with fully connected layers.
......
......@@ -72,10 +72,10 @@ def analyze_pickle_histogram(args):
if hasattr(rom, 'estimate'):
ests = []
for u, mu in zip(us, mus):
for mu in mus:
print(f'Estimating error for {mu} ... ', end='')
sys.stdout.flush()
ests.append(rom.estimate_error(u, mu=mu))
ests.append(rom.estimate_error(mu))
print('done')