Commit 581d1740 authored by Tim Keil's avatar Tim Keil
Browse files

refactor d_mu computations in Model and StationaryModel

parent dde4b3af
......@@ -102,33 +102,27 @@ class StationaryModel(Model):
self._dual = self.with_(operator=self.dual_operator, rhs=self.dual_rhs)
return self._dual
def _compute_solution_d_mu(self, parameter, index, solution, mu):
residual_dmu_lhs = VectorOperator(self.operator.d_mu(parameter, index).apply(solution, mu=mu))
residual_dmu_rhs = self.rhs.d_mu(parameter, index)
rhs_operator = residual_dmu_rhs-residual_dmu_lhs
def _compute_solution_d_mu_single_direction(self, parameter, index, solution, mu):
lhs_d_mu = VectorOperator(self.operator.d_mu(parameter, index).apply(solution, mu=mu))
rhs_d_mu = self.rhs.d_mu(parameter, index)
rhs_operator = rhs_d_mu - lhs_d_mu
return self.operator.apply_inverse(rhs_operator.as_range_array(mu), mu=mu)
_compute_allowed_kwargs = frozenset({'adjoint_approach'})
def _compute_output_d_mu(self, U, mu, adjoint_approach=True):
gradient = []
if adjoint_approach:
# Use dual solution for gradient
P = self.dual.solve(mu)
if adjoint_approach is False:
return super()._compute_output_d_mu(U, mu)
else:
# Use sensitivities for gradient
U_d_mus = self._compute_solution_d_mus(U, mu)
for (parameter, size) in self.parameters.items():
for index in range(size):
output_partial_dmu = self.output_functional.d_mu(parameter, index).apply(U, mu=mu).to_numpy()[0,0]
if adjoint_approach:
residual_dmu_lhs = self.operator.d_mu(parameter, index).apply2(U, P, mu=mu)
residual_dmu_rhs = self.rhs.d_mu(parameter, index).apply_adjoint(P, mu=mu).to_numpy()[0,0]
gradient.append((output_partial_dmu + residual_dmu_rhs - residual_dmu_lhs)[0,0])
else:
U_d_mu = U_d_mus[parameter][index]
gradient.append(output_partial_dmu + \
self.output_functional.apply(U_d_mu, mu).to_numpy()[0,0])
# Use the adjoint approach for computing the gradient, see _[HPUU09]
gradient = []
P = self.dual.solve(mu)
for (parameter, size) in self.parameters.items():
for index in range(size):
output_partial_dmu = self.output_functional.d_mu(parameter, index).apply(U, mu=mu).to_numpy()[0,0]
lhs_d_mu = self.operator.d_mu(parameter, index).apply2(U, P, mu=mu)
rhs_d_mu = self.rhs.d_mu(parameter, index).apply_adjoint(P, mu=mu).to_numpy()[0,0]
gradient.append((output_partial_dmu + rhs_d_mu - lhs_d_mu)[0,0])
return np.array(gradient)
......
......@@ -109,7 +109,7 @@ class Model(CacheableObject, ParametricObject):
else:
return self.output_functional.apply(solution, mu=mu).to_numpy()
def _compute_solution_d_mu(self, parameter, index, solution, mu=None, **kwargs):
def _compute_solution_d_mu_single_direction(self, parameter, index, solution, mu=None, **kwargs):
"""Compute the partial derivative of the solution w.r.t. a parameter index
Parameters
......@@ -127,7 +127,7 @@ class Model(CacheableObject, ParametricObject):
"""
raise NotImplementedError
def _compute_solution_d_mus(self, solution, mu=None, **kwargs):
def _compute_solution_d_mu(self, solution, mu=None, **kwargs):
"""Compute all partial derivative of the solution w.r.t. a parameter index
Parameters
......@@ -143,7 +143,8 @@ class Model(CacheableObject, ParametricObject):
for (parameter, size) in sorted(self.parameters.items()):
sens_for_param = self.solution_space.empty()
for l in range(size):
sens_for_param.append(self._compute_solution_d_mu(parameter, l, solution, mu))
sens_for_param.append(self._compute_solution_d_mu_single_direction(
parameter, l, solution, mu))
sensitivities[parameter] = sens_for_param
return sensitivities
......@@ -155,7 +156,14 @@ class Model(CacheableObject, ParametricObject):
mu
|Parameters value| for which to compute the gradient
"""
raise NotImplementedError
gradient = []
U_d_mus = self._compute_solution_d_mu(solution, mu)
for (parameter, size) in self.parameters.items():
for index in range(size):
output_partial_dmu = self.output_functional.d_mu(parameter, index).apply(solution, mu=mu).to_numpy()[0,0]
U_d_mu = U_d_mus[parameter][index]
gradient.append(output_partial_dmu + self.output_functional.apply(U_d_mu, mu).to_numpy()[0,0])
return np.array(gradient)
def _compute_solution_error_estimate(self, solution, mu=None, **kwargs):
"""Compute an error estimate for the computed internal state.
......@@ -254,10 +262,11 @@ class Model(CacheableObject, ParametricObject):
output
If `True`, return the model output.
solution_d_mu
If `True`, return the derivative of the model's internal state w.r.t. a
parameter component.
If not `False`, either `True` to return the derivative of the model's
internal state w.r.t. all parameter components or a tuple `(parameter, index)`
to return the derivative of a single parameter component.
output_d_mu
If `True`, return the derivative model output w.r.t. a parameter component.
If `True`, return the gradient of the model output w.r.t. the |Parameter|.
solution_error_estimate
If `True`, return an error estimate for the computed internal state.
output_error_estimate
......@@ -313,12 +322,16 @@ class Model(CacheableObject, ParametricObject):
data['output'] = retval
if solution_d_mu and 'solution_d_mu' not in data:
retval = self._compute_solution_d_mus(data['solution'], mu=mu, **kwargs)
if isinstance(solution_d_mu, tuple):
retval = self._compute_solution_d_mu_single_direction(
solution_d_mu[0], solution_d_mu[1], data['solution'], mu=mu, **kwargs)
else:
retval = self._compute_solution_d_mu(data['solution'], mu=mu, **kwargs)
if isinstance(retval, dict):
assert 'solution_d_mu' in retval
data.update(retval)
else:
data['solution'] = retval
data['solution_d_mu'] = retval
if output_d_mu and 'output_d_mu' not in data:
# TODO use caching here (requires skipping args in key generation)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment