Commit 3423bc39 authored by pyMOR Bot's avatar pyMOR Bot

{"repo_url": "https://github.com/pymor/pymor.git", "pr": 1113, "base":...

{"repo_url": "https://github.com/pymor/pymor.git", "pr": 1113, "base": "master", "head_ref": "compute", "pr_branch": "github/PR_1113", "commit_sha": "8169ec47"}
parents 45f02488 8169ec47
Pipeline #65770 passed with stages
in 70 minutes and 20 seconds
......@@ -78,7 +78,7 @@ Solving the Model
Now that we have our FOM and a reduced space :math:`V_N` spanned by `basis`, we can project
the |Model|. However, before doing so, we need to understand how actually
solving the FOM works. Let's take a look at what
:meth:`~pymor.models.interface.Model.solve` actually does:
:meth:`~pymor.models.interface.Model.solve` does:
.. jupyter-execute::
......@@ -86,19 +86,26 @@ solving the FOM works. Let's take a look at what
print_source(fom.solve)
This does not look too interesting. Actually, :meth:`~pymor.models.interface.Model.solve`
is just a thin wrapper around the `_solve` method, which performs the actual
computations. All that :meth:`~pymor.models.interface.Model.solve` does is
checking the input |parameter values| `mu` and :mod:`caching <pymor.core.cache>`
the results. So let's take a look at `_solve`:
is just a convenience method around :meth:`~pymor.models.interface.Model.compute` which
handles the actual computation of the solution and various other associated values like
outputs or error estimates. Next, we take a look at the implemenation of
:meth:`~pymor.models.interface.Model.compute`:
.. jupyter-execute::
print_source(fom._solve)
print_source(fom.compute)
There is some code related to logging and the computation of an output functional.
The interesting line is::
What we see is a default implementation from :class:`~pymor.models.interface.Model` that
takes care of checking the input |parameter values| `mu`, :mod:`caching <pymor.core.cache>` and
:mod:`logging <pymor.core.logger>`, but defers the actual computations to further private methods.
Implementors can directly implement :meth:`~pymor.models.interface.Model._compute` to compute
multiple return values at once in an optimized way. Our given model, however, just implements
:meth:`~pymor.models.interface.Model._compute_solution` where we can find the
actual code:
U = self.operator.apply_inverse(self.rhs.as_range_array(mu), mu=mu)
.. jupyter-execute::
print_source(fom._compute_solution)
What does this mean? If we look at the type of `fom`,
......
......@@ -333,9 +333,10 @@ def _compute_errors(mu, fom, reductor, error_estimator, error_norms, condition,
for i_N, N in enumerate(basis_sizes):
rom = reductor.reduce(dims={k: N for k in reductor.bases})
u = rom.solve(mu)
result = rom.compute(solution=True, solution_error_estimate=error_estimator, mu=mu)
u = result['solution']
if error_estimator:
e = rom.estimate_error(u, mu)
e = result['solution_error_estimate']
e = e[0] if hasattr(e, '__len__') else e
error_estimates[i_N] = e
if fom and reductor:
......
......@@ -268,7 +268,7 @@ def _rb_surrogate_evaluate(rom=None, fom=None, reductor=None, mus=None, error_no
return -1., None
if fom is None:
errors = [rom.estimate_error(rom.solve(mu), mu) for mu in mus]
errors = [rom.estimate_error(mu) for mu in mus]
elif error_norm is not None:
errors = [error_norm(fom.solve(mu) - reductor.reconstruct(rom.solve(mu))) for mu in mus]
else:
......
......@@ -84,18 +84,8 @@ class StationaryModel(Model):
f' output_space: {self.output_space}\n'
)
def _solve(self, mu=None, return_output=False):
# explicitly checking if logging is disabled saves the str(mu) call
if not self.logging_disabled:
self.logger.info(f'Solving {self.name} for {mu} ...')
U = self.operator.apply_inverse(self.rhs.as_range_array(mu), mu=mu)
if return_output:
if self.output_functional is None:
raise ValueError('Model has no output')
return U, self.output_functional.apply(U, mu=mu)
else:
return U
def _compute_solution(self, mu=None, **kwargs):
return self.operator.apply_inverse(self.rhs.as_range_array(mu), mu=mu)
class InstationaryModel(Model):
......@@ -195,21 +185,12 @@ class InstationaryModel(Model):
def with_time_stepper(self, **kwargs):
return self.with_(time_stepper=self.time_stepper.with_(**kwargs))
def _solve(self, mu=None, return_output=False):
# explicitly checking if logging is disabled saves the expensive str(mu) call
if not self.logging_disabled:
self.logger.info(f'Solving {self.name} for {mu} ...')
def _compute_solution(self, mu=None, **kwargs):
mu = mu.with_(t=0.)
U0 = self.initial_data.as_range_array(mu)
U = self.time_stepper.solve(operator=self.operator, rhs=self.rhs, initial_data=U0, mass=self.mass,
initial_time=0, end_time=self.T, mu=mu, num_values=self.num_values)
if return_output:
if self.output_functional is None:
raise ValueError('Model has no output')
return U, self.output_functional.apply(U, mu=mu)
else:
return U
return U
def to_lti(self):
"""Convert model to |LTIModel|.
......
This diff is collapsed.
......@@ -45,9 +45,6 @@ class InputOutputModel(Model):
def output_dim(self):
return self.output_space.dim
def _solve(self, mu=None):
raise NotImplementedError
def eval_tf(self, s, mu=None):
"""Evaluate the transfer function."""
raise NotImplementedError
......
......@@ -36,9 +36,9 @@ class MPIModel:
self.parameters_internal = m.parameters_internal
self.visualizer = MPIVisualizer(obj_id)
def _solve(self, mu=None):
def _compute_solution(self, mu=None, **kwargs):
return self.solution_space.make_array(
mpi.call(mpi.method_call_manage, self.obj_id, 'solve', mu=mu)
mpi.call(mpi.method_call_manage, self.obj_id, '_compute_solution', mu=mu, **kwargs)
)
def visualize(self, U, **kwargs):
......
......@@ -62,9 +62,7 @@ if config.HAVE_TORCH:
if output_functional is not None:
self.output_space = output_functional.range
def _solve(self, mu=None, return_output=False):
if not self.logging_disabled:
self.logger.info(f'Solving {self.name} for {mu} ...')
def _compute_solution(self, mu=None, **kwargs):
# convert the parameter `mu` into a form that is usable in PyTorch
converted_input = torch.from_numpy(mu.to_numpy()).double()
......@@ -73,13 +71,7 @@ if config.HAVE_TORCH:
# convert plain numpy array to element of the actual solution space
U = self.solution_space.make_array(U)
if return_output:
if self.output_functional is None:
raise ValueError('Model has no output')
return U, self.output_functional.apply(U, mu=mu)
else:
return U
return U
class FullyConnectedNN(nn.Module, BasicObject):
"""Class for neural networks with fully connected layers.
......
......@@ -72,10 +72,10 @@ def analyze_pickle_histogram(args):
if hasattr(rom, 'estimate'):
ests = []
for u, mu in zip(us, mus):
for mu in mus:
print(f'Estimating error for {mu} ... ', end='')
sys.stdout.flush()
ests.append(rom.estimate_error(u, mu=mu))
ests.append(rom.estimate_error(mu))
print('done')
if args['--detailed']:
......@@ -212,10 +212,10 @@ def analyze_pickle_convergence(args):
if hasattr(rom, 'estimate'):
ests = []
start = time.time()
for u, mu in zip(us, mus):
for mu in mus:
# print('e', end='')
# sys.stdout.flush()
ests.append(rom.estimate_error(u, mu=mu))
ests.append(rom.estimate_error(mu))
ESTS.append(max(ests))
T_ESTS.append((time.time() - start) * 1000. / len(mus))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment