Commit 8873740b authored by Tim Keil's avatar Tim Keil
Browse files

[models] some change for solve_d_mu

parent a6606c43
......@@ -339,8 +339,8 @@ class Model(CacheableObject, ParametricObject):
solution_d_mu[0], solution_d_mu[1], data['solution'], mu=mu, **kwargs)
else:
retval = self._compute_solution_d_mu(data['solution'], mu=mu, **kwargs)
if isinstance(retval, dict):
assert 'solution_d_mu' in retval
# retval is always a dict
if isinstance(retval, dict) and 'solution_d_mu' in retval:
data.update(retval)
else:
data['solution_d_mu'] = retval
......@@ -463,11 +463,11 @@ class Model(CacheableObject, ParametricObject):
The sensitivity of the solution as a |VectorArray|.
"""
data = self.compute(
solution_d_mu=True,
solution_d_mu=(parameter, index),
mu=mu,
**kwargs
)
return data['solution_d_mu'][parameter][index]
return data['solution_d_mu']
def output_d_mu(self, mu=None, **kwargs):
"""compute the gradient w.r.t. the parameter of the output functional
......
......@@ -59,11 +59,14 @@ def main(
RB_greedy_data = rb_greedy(fom, RB_reductor, training_set, atol=1e-2)
rom = RB_greedy_data['rom']
#verifying that the adjoint and sensitivity gradients are the samea
#verifying that the adjoint and sensitivity gradients are the same and that solve_d_mu also works
for mu in training_set:
gradient_with_adjoint_approach = rom.output_d_mu(mu, use_adjoint=True)
gradient_with_sensitivities = rom.output_d_mu(mu, use_adjoint=False)
assert np.allclose(gradient_with_adjoint_approach, gradient_with_sensitivities)
u_d_mu = rom.solve_d_mu('diffusion', 1, mu=mu).to_numpy()
u_d_mu_ = rom.compute(solution_d_mu=True, mu=mu)['solution_d_mu']['diffusion'][1].to_numpy()
assert np.allclose(u_d_mu, u_d_mu_)
def rom_objective_functional(mu):
return rom.output(mu)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment