diff --git a/docs/source/tutorial_projection.rst b/docs/source/tutorial_projection.rst index aadb3c617ab3a9f7ba925e1698a293efde964a7f..b875df8ce6558d439e7cf52960285b90c72ee948 100644 --- a/docs/source/tutorial_projection.rst +++ b/docs/source/tutorial_projection.rst @@ -78,7 +78,7 @@ Solving the Model Now that we have our FOM and a reduced space :math:`V_N` spanned by `basis`, we can project the |Model|. However, before doing so, we need to understand how actually solving the FOM works. Let's take a look at what -:meth:`~pymor.models.interface.Model.solve` actually does: +:meth:`~pymor.models.interface.Model.solve` does: .. jupyter-execute:: @@ -86,19 +86,26 @@ solving the FOM works. Let's take a look at what print_source(fom.solve) This does not look too interesting. Actually, :meth:`~pymor.models.interface.Model.solve` -is just a thin wrapper around the `_solve` method, which performs the actual -computations. All that :meth:`~pymor.models.interface.Model.solve` does is -checking the input |parameter values| `mu` and :mod:`caching ` -the results. So let's take a look at `_solve`: +is just a convenience method around :meth:`~pymor.models.interface.Model.compute` which +handles the actual computation of the solution and various other associated values like +outputs or error estimates. Next, we take a look at the implemenation of +:meth:`~pymor.models.interface.Model.compute`: .. jupyter-execute:: - print_source(fom._solve) + print_source(fom.compute) -There is some code related to logging and the computation of an output functional. -The interesting line is:: +What we see is a default implementation from :class:`~pymor.models.interface.Model` that +takes care of checking the input |parameter values| `mu`, :mod:`caching ` and +:mod:`logging `, but defers the actual computations to further private methods. +Implementors can directly implement :meth:`~pymor.models.interface.Model._compute` to compute +multiple return values at once in an optimized way. Our given model, however, just implements +:meth:`~pymor.models.interface.Model._compute_solution` where we can find the +actual code: - U = self.operator.apply_inverse(self.rhs.as_range_array(mu), mu=mu) +.. jupyter-execute:: + + print_source(fom._compute_solution) What does this mean? If we look at the type of `fom`, @@ -160,12 +167,13 @@ Let's try solving the model on our own: That did not work too well! In pyMOR, all parametric objects expect the `mu` argument to be an instance of the :class:`~pymor.parameters.base.Mu` -class. :meth:`~pymor.models.interface.Model.solve` is an exception: for -convenience, it accepts as a `mu` argument anything that can be converted +class. :meth:`~pymor.models.interface.Model.compute` and related methods +like :meth:`~pymor.models.interface.Model.solve` are an exception: for +convenience, they accept as a `mu` argument anything that can be converted to a :class:`~pymor.parameters.base.Mu` instance using the :meth:`~pymor.parameters.base.Parameters.parse` method of the :class:`~pymor.parameters.base.Parameters` class. In fact, if you look -back at the implementation of :meth:`~pymor.models.interface.Model.solve`, +back at the implementation of :meth:`~pymor.models.interface.Model.compute`, you see the explicit call to :meth:`~pymor.parameters.base.Parameters.parse`. We try again: diff --git a/src/pymor/algorithms/error.py b/src/pymor/algorithms/error.py index 2f6362d6b9094e3d0f667e8c83d1b0db3932b324..33a578ddd293acae1e6c15638ef09cb5fa405d86 100644 --- a/src/pymor/algorithms/error.py +++ b/src/pymor/algorithms/error.py @@ -333,9 +333,10 @@ def _compute_errors(mu, fom, reductor, error_estimator, error_norms, condition, for i_N, N in enumerate(basis_sizes): rom = reductor.reduce(dims={k: N for k in reductor.bases}) - u = rom.solve(mu) + result = rom.compute(solution=True, solution_error_estimate=error_estimator, mu=mu) + u = result['solution'] if error_estimator: - e = rom.estimate_error(u, mu) + e = result['solution_error_estimate'] e = e[0] if hasattr(e, '__len__') else e error_estimates[i_N] = e if fom and reductor: diff --git a/src/pymor/algorithms/greedy.py b/src/pymor/algorithms/greedy.py index 2e75877a8abcf8d8355988b9a45dd4fdf29af4fe..290574e07732cdf1ef3b6a8b3b7579ffd1c2ed26 100644 --- a/src/pymor/algorithms/greedy.py +++ b/src/pymor/algorithms/greedy.py @@ -268,7 +268,7 @@ def _rb_surrogate_evaluate(rom=None, fom=None, reductor=None, mus=None, error_no return -1., None if fom is None: - errors = [rom.estimate_error(rom.solve(mu), mu) for mu in mus] + errors = [rom.estimate_error(mu) for mu in mus] elif error_norm is not None: errors = [error_norm(fom.solve(mu) - reductor.reconstruct(rom.solve(mu))) for mu in mus] else: diff --git a/src/pymor/models/basic.py b/src/pymor/models/basic.py index 7c98d3cd754ac656e20fdae947dd390702f07af9..80b2608dc967889542bc110295b8008477ee2f38 100644 --- a/src/pymor/models/basic.py +++ b/src/pymor/models/basic.py @@ -84,18 +84,8 @@ class StationaryModel(Model): f' output_space: {self.output_space}\n' ) - def _solve(self, mu=None, return_output=False): - # explicitly checking if logging is disabled saves the str(mu) call - if not self.logging_disabled: - self.logger.info(f'Solving {self.name} for {mu} ...') - - U = self.operator.apply_inverse(self.rhs.as_range_array(mu), mu=mu) - if return_output: - if self.output_functional is None: - raise ValueError('Model has no output') - return U, self.output_functional.apply(U, mu=mu) - else: - return U + def _compute_solution(self, mu=None, **kwargs): + return self.operator.apply_inverse(self.rhs.as_range_array(mu), mu=mu) class InstationaryModel(Model): @@ -195,21 +185,12 @@ class InstationaryModel(Model): def with_time_stepper(self, **kwargs): return self.with_(time_stepper=self.time_stepper.with_(**kwargs)) - def _solve(self, mu=None, return_output=False): - # explicitly checking if logging is disabled saves the expensive str(mu) call - if not self.logging_disabled: - self.logger.info(f'Solving {self.name} for {mu} ...') - + def _compute_solution(self, mu=None, **kwargs): mu = mu.with_(t=0.) U0 = self.initial_data.as_range_array(mu) U = self.time_stepper.solve(operator=self.operator, rhs=self.rhs, initial_data=U0, mass=self.mass, initial_time=0, end_time=self.T, mu=mu, num_values=self.num_values) - if return_output: - if self.output_functional is None: - raise ValueError('Model has no output') - return U, self.output_functional.apply(U, mu=mu) - else: - return U + return U def to_lti(self): """Convert model to |LTIModel|. diff --git a/src/pymor/models/interface.py b/src/pymor/models/interface.py index 85264f668f61e151124219cf21836f2e7107a645..4bb3211a61d0130763b57a3a881d4101591690c6 100644 --- a/src/pymor/models/interface.py +++ b/src/pymor/models/interface.py @@ -8,6 +8,7 @@ from pymor.operators.constructions import induced_norm from pymor.parameters.base import ParametricObject, Mu from pymor.tools.frozendict import FrozenDict from pymor.tools.deprecated import Deprecated +from pymor.vectorarrays.interface import VectorArray class Model(CacheableObject, ParametricObject): @@ -47,51 +48,312 @@ class Model(CacheableObject, ParametricObject): self.__auto_init(locals()) - @abstractmethod - def _solve(self, mu=None, return_output=False, **kwargs): - """Perform the actual solving.""" - pass + def _compute(self, solution=False, output=False, + solution_error_estimate=False, output_error_estimate=False, + mu=None, **kwargs): + return {} - def solve(self, mu=None, return_output=False, **kwargs): - """Solve the discrete problem for the |parameter values| `mu`. + def _compute_solution(self, mu=None, **kwargs): + """Compute the model's solution for |parameter values| `mu`. - The result will be :mod:`cached ` - in case caching has been activated for the given model. + This method is called by the default implementation of :meth:`compute` + in :class:`pymor.models.interface.Model`. Parameters ---------- mu - |Parameter values| for which to solve. - return_output - If `True`, the model output for the given |parameter values| `mu` is - returned as a |VectorArray| from :attr:`output_space`. + |Parameter values| for which to compute the solution. + kwargs + Additional keyword arguments to customize how the solution is + computed or to select additional data to be returned. Returns ------- - The solution |VectorArray|. When `return_output` is `True`, - the output |VectorArray| is returned as second value. + |VectorArray| with the computed solution or a dict which at least + must contain the key `'solution'`. """ + raise NotImplementedError + + def _compute_output(self, solution, mu=None, **kwargs): + """Compute the model's output for |parameter values| `mu`. + + This method is called by the default implementation of :meth:`compute` + in :class:`pymor.models.interface.Model`. The assumption is made + that the output is a derived quantity from the model's internal state + as returned my :meth:`_compute_solution`. When this is not the case, + the computation of the output should be implemented in :meth:`_compute`. + + .. note:: + + The default implementation applies the |Operator| given by the + :attr:`output_functional` attribute to the given `solution` + |VectorArray|. + + Parameters + ---------- + solution + Internal model state for the given |parameter values|. + mu + |Parameter values| for which to compute the output. + kwargs + Additional keyword arguments to customize how the output is + computed or to select additional data to be returned. + + Returns + ------- + |VectorArray| with the computed output or a dict which at least + must contain the key `'output'`. + """ + if not hasattr(self, 'output_functional'): + raise NotImplementedError + if self.output_functional is None: + raise ValueError('Model has no output') + return self.output_functional.apply(solution, mu=mu) + + def _compute_solution_error_estimate(self, solution, mu=None, **kwargs): + """Compute an error estimate for the computed internal state. + + This method is called by the default implementation of :meth:`compute` + in :class:`pymor.models.interface.Model`. The assumption is made + that the error estimate is a derived quantity from the model's internal state + as returned my :meth:`_compute_solution`. When this is not the case, + the computation of the error estimate should be implemented in :meth:`_compute`. + + .. note:: + + The default implementation calls the `estimate_error` method of the object + given by the :attr:`error_estimator` attribute, passing `solution`, + `mu`, `self` and `**kwargs`. + + Parameters + ---------- + solution + Internal model state for the given |parameter values|. + mu + |Parameter values| for which to compute the error estimate. + kwargs + Additional keyword arguments to customize how the error estimate is + computed or to select additional data to be returned. + + Returns + ------- + The computed error estimate or a dict which at least must contain the key + `'solution_error_estimate'`. + """ + if self.error_estimator is None: + raise ValueError('Model has no error estimator') + return self.error_estimator.estimate_error(solution, mu, self, **kwargs) + + def _compute_output_error_estimate(self, solution, mu=None, **kwargs): + """Compute an error estimate for the computed model output. + + This method is called by the default implementation of :meth:`compute` + in :class:`pymor.models.interface.Model`. The assumption is made + that the error estimate is a derived quantity from the model's internal state + as returned my :meth:`_compute_solution`. When this is not the case, + the computation of the error estimate should be implemented in :meth:`_compute`. + + .. note:: + + The default implementation calls the `estimate_output_error` method of the object + given by the :attr:`error_estimator` attribute, passing `solution`, + `mu`, `self` and `**kwargs`. + + Parameters + ---------- + solution + Internal model state for the given |parameter values|. + mu + |Parameter values| for which to compute the error estimate. + kwargs + Additional keyword arguments to customize how the error estimate is + computed or to select additional data to be returned. + + Returns + ------- + The computed error estimate or a dict which at least must contain the key + `'solution_error_estimate'`. + """ + if self.error_estimator is None: + raise ValueError('Model has no error estimator') + return self.error_estimator.estimate_output_error(solution, mu, self, **kwargs) + + _compute_allowed_kwargs = frozenset() + + def compute(self, solution=False, output=False, + solution_error_estimate=False, output_error_estimate=False, *, + mu=None, **kwargs): + """Compute the solution of the model and associated quantities. + + This methods the output of the model it's internal state + and various associated quantities for given |parameter values| + `mu`. + + .. note:: + + The default implementation defers the actual computations to + the methods :meth:`_compute_solution`, :meth:`_compute_output`, + :meth:`_compute_solution_error_estimate` and :meth:`_compute_output_error_estimate`. + The call to :meth:`_compute_solution` is :mod:`cached `. + In addition, |Model| implementors may implement :meth:`_compute` to + simultaneously compute multiple values in an optimized way. The corresponding + `_compute_XXX` methods will not be called for values already returned by + :meth:`_compute`. + + Parameters + ---------- + solution + If `True`, return the model's internal state. + output + If `True`, return the model output. + solution_error_estimate + If `True`, return an error estimate for the computed internal state. + output_error_estimate + If `True`, return an error estimate for the computed output. + mu + |Parameter values| for which to compute the values. + kwargs + Further keyword arguments to select further quantities that sould + be returned or to customize how the values are computed. + + Returns + ------- + A dict with the computed values. + """ + + # make sure no unknown kwargs are passed + assert kwargs.keys() <= self._compute_allowed_kwargs + + # parse parameter values if not isinstance(mu, Mu): mu = self.parameters.parse(mu) assert self.parameters.assert_compatible(mu) - return self.cached_method_call(self._solve, mu=mu, return_output=return_output, **kwargs) - def output(self, mu=None, **kwargs): + # log output + # explicitly checking if logging is disabled saves some cpu cycles + if not self.logging_disabled: + self.logger.info(f'Solving {self.name} for {mu} ...') + + # first call _compute to give subclasses more control + data = self._compute(solution=solution, output=output, + solution_error_estimate=solution_error_estimate, + output_error_estimate=output_error_estimate, + mu=mu, **kwargs) + + if (solution or output or solution_error_estimate or output_error_estimate) and \ + 'solution' not in data: + retval = self.cached_method_call(self._compute_solution, mu=mu, **kwargs) + if isinstance(retval, dict): + assert 'solution' in retval + data.update(retval) + else: + data['solution'] = retval + + if output and 'output' not in data: + # TODO use caching here (requires skipping args in key generation) + retval = self._compute_output(data['solution'], mu=mu, **kwargs) + if isinstance(retval, dict): + assert 'output' in retval + data.update(retval) + else: + data['output'] = retval + + if solution_error_estimate and 'solution_error_estimate' not in data: + # TODO use caching here (requires skipping args in key generation) + retval = self._compute_solution_error_estimate(data['solution'], mu=mu, **kwargs) + if isinstance(retval, dict): + assert 'solution_error_estimate' in retval + data.update(retval) + else: + data['solution_error_estimate'] = retval + + if output_error_estimate and 'output_error_estimate' not in data: + # TODO use caching here (requires skipping args in key generation) + retval = self._compute_output_error_estimate(data['solution'], mu=mu, **kwargs) + if isinstance(retval, dict): + assert 'output_error_estimate' in retval + data.update(retval) + else: + data['output_error_estimate'] = retval + + return data + + def solve(self, mu=None, return_error_estimate=False, **kwargs): + """Solve the discrete problem for the |parameter values| `mu`. + + This method returns a |VectorArray| with a internal state + representation of the model's solution for given + |parameter values|. It is a convenience wrapper around + :meth:`compute`. + + The result may be :mod:`cached ` + in case caching has been activated for the given model. + + Parameters + ---------- + mu + |Parameter values| for which to solve. + return_error_estimate + If `True`, also return an error estimate for the computed solution. + kwargs + Additional keyword arguments passed to :meth:`compute` that + might affect how the solution is computed. + + Returns + ------- + The solution |VectorArray|. When `return_error_estimate` is `True`, + the estimate is returned as second value. + """ + data = self.compute( + solution=True, + solution_error_estimate=return_error_estimate, + mu=mu, + **kwargs + ) + if return_error_estimate: + return data['solution'], data['solution_error_estimate'] + else: + return data['solution'] + + def output(self, mu=None, return_error_estimate=False, **kwargs): """Return the model output for given |parameter values| `mu`. + This method is a convenience wrapper around :meth:`compute`. + Parameters ---------- mu |Parameter values| for which to compute the output. + return_error_estimate + If `True`, also return an error estimate for the computed output. + kwargs + Additional keyword arguments passed to :meth:`compute` that + might affect how the solution is computed. Returns ------- The computed model output as a |VectorArray| from `output_space`. + When `return_error_estimate` is `True`, the estimate is returned as + second value. """ - return self.solve(mu=mu, return_output=True, **kwargs)[1] + data = self.compute( + output=True, + output_error_estimate=return_error_estimate, + mu=mu, + **kwargs + ) + if return_error_estimate: + return data['output'], data['output_error_estimate'] + else: + return data['output'] - def estimate_error(self, U, mu=None): - """Estimate the model error for a given solution. + def estimate_error(self, mu=None, **kwargs): + """Estimate the error for the computed internal state. + + For given |parameter values| `mu` this method returns an + error estimate for the computed internal model state as returned + by :meth:`solve`. It is a convenience wrapper around + :meth:`compute`. The model error could be the error w.r.t. the analytical solution of the given problem or the model reduction error w.r.t. @@ -99,35 +361,67 @@ class Model(CacheableObject, ParametricObject): Parameters ---------- - U - The solution obtained by :meth:`~solve`. mu - |Parameter values| for which `U` has been obtained. + |Parameter values| for which to estimate the error. + kwargs + Additional keyword arguments passed to :meth:`compute` that + might affect how the error estimate (or the solution) is computed. Returns ------- The estimated error. """ - if getattr(self, 'error_estimator') is not None: - return self.error_estimator.estimate_error(U, mu=mu, m=self) - else: - raise NotImplementedError('Model has no error estimator.') + return self.compute( + solution_error_estimate=True, + mu=mu, + **kwargs + )['solution_error_estimate'] @Deprecated('estimate_error') def estimate(self, U, mu=None): - return self.estimate_error(U, mu) + return self.estimate_error(mu) + + def estimate_output_error(self, mu=None, **kwargs): + """Estimate the error for the computed output. + + For given |parameter values| `mu` this method returns an + error estimate for the computed model output as returned + by :meth:`output`. It is a convenience wrapper around + :meth:`compute`. + + The output error could be the error w.r.t. the analytical + solution of the given problem or the model reduction error w.r.t. + a corresponding high-dimensional |Model|. + + Parameters + ---------- + mu + |Parameter values| for which to estimate the error. + kwargs + Additional keyword arguments passed to :meth:`compute` that + might affect how the error estimate (or the output) is computed. + + Returns + ------- + The estimated error. + """ + return self.compute( + output_error_estimate=True, + mu=mu, + **kwargs + )['output_error_estimate'] def visualize(self, U, **kwargs): - """Visualize a solution |VectorArray| U. + """Visualize a |VectorArray| U of the model's :attr:`solution_space`. Parameters ---------- U - The |VectorArray| from - :attr:`~pymor.models.interface.Model.solution_space` + The |VectorArray| from :attr:`solution_space` that shall be visualized. kwargs - See docstring of `self.visualizer.visualize`. + Additional keyword arguments to customize the visualization. + See the docstring of `self.visualizer.visualize`. """ if getattr(self, 'visualizer') is not None: return self.visualizer.visualize(U, self, **kwargs) diff --git a/src/pymor/models/iosys.py b/src/pymor/models/iosys.py index c4cb454eb1209e68cd2111a74d75b7f1afd24d8e..9b6ed7ae02b387411acce7ab2bdb0e4ce3f1a5af 100644 --- a/src/pymor/models/iosys.py +++ b/src/pymor/models/iosys.py @@ -45,9 +45,6 @@ class InputOutputModel(Model): def output_dim(self): return self.output_space.dim - def _solve(self, mu=None): - raise NotImplementedError - def eval_tf(self, s, mu=None): """Evaluate the transfer function.""" raise NotImplementedError diff --git a/src/pymor/models/mpi.py b/src/pymor/models/mpi.py index d49ef84350bf0800e221ec566f3a7b8462372b8d..7fb388fbaf7512c4a9e9e8c3e08dca230b7fc4d0 100644 --- a/src/pymor/models/mpi.py +++ b/src/pymor/models/mpi.py @@ -36,9 +36,9 @@ class MPIModel: self.parameters_internal = m.parameters_internal self.visualizer = MPIVisualizer(obj_id) - def _solve(self, mu=None): + def _compute_solution(self, mu=None, **kwargs): return self.solution_space.make_array( - mpi.call(mpi.method_call_manage, self.obj_id, 'solve', mu=mu) + mpi.call(mpi.method_call_manage, self.obj_id, '_compute_solution', mu=mu, **kwargs) ) def visualize(self, U, **kwargs): diff --git a/src/pymor/models/neural_network.py b/src/pymor/models/neural_network.py index 4c92a92063bf793e8dc7ab4610d85046eb6ef20a..dfec5f1f6e1da83af16d4c60a62cd00cd40ff6ab 100644 --- a/src/pymor/models/neural_network.py +++ b/src/pymor/models/neural_network.py @@ -62,9 +62,7 @@ if config.HAVE_TORCH: if output_functional is not None: self.output_space = output_functional.range - def _solve(self, mu=None, return_output=False): - if not self.logging_disabled: - self.logger.info(f'Solving {self.name} for {mu} ...') + def _compute_solution(self, mu=None, **kwargs): # convert the parameter `mu` into a form that is usable in PyTorch converted_input = torch.from_numpy(mu.to_numpy()).double() @@ -73,13 +71,7 @@ if config.HAVE_TORCH: # convert plain numpy array to element of the actual solution space U = self.solution_space.make_array(U) - if return_output: - if self.output_functional is None: - raise ValueError('Model has no output') - return U, self.output_functional.apply(U, mu=mu) - else: - return U - + return U class FullyConnectedNN(nn.Module, BasicObject): """Class for neural networks with fully connected layers. diff --git a/src/pymordemos/analyze_pickle.py b/src/pymordemos/analyze_pickle.py index b767135335fa03cb9a09c218820b6de07b1574ca..bd0392a8ce18a8f7c676bd28b6e05baa16cc823a 100755 --- a/src/pymordemos/analyze_pickle.py +++ b/src/pymordemos/analyze_pickle.py @@ -72,10 +72,10 @@ def analyze_pickle_histogram(args): if hasattr(rom, 'estimate'): ests = [] - for u, mu in zip(us, mus): + for mu in mus: print(f'Estimating error for {mu} ... ', end='') sys.stdout.flush() - ests.append(rom.estimate_error(u, mu=mu)) + ests.append(rom.estimate_error(mu)) print('done') if args['--detailed']: @@ -212,10 +212,10 @@ def analyze_pickle_convergence(args): if hasattr(rom, 'estimate'): ests = [] start = time.time() - for u, mu in zip(us, mus): + for mu in mus: # print('e', end='') # sys.stdout.flush() - ests.append(rom.estimate_error(u, mu=mu)) + ests.append(rom.estimate_error(mu)) ESTS.append(max(ests)) T_ESTS.append((time.time() - start) * 1000. / len(mus))