FlatTop PoU with overlap = 1 coincides with Q1, as it should

parent 1eca48d1
......@@ -2,3 +2,4 @@
dask-worker-space
worker-space
DATA
*.pyc
import yaml
yaml.warnings({'YAMLLoadWarning': False})
import os
import numpy as np
from simdb.run import new_dataset, append_values
from pymor.core.logger import set_log_levels, getLogger
from pymor.operators.block import BlockOperator
from dune.gdt.usercode import ContinuousFlatTopPartitionOfUnity
from thermalblock_problem import domain, init_problem
from dune.gdt.usercode import DomainDecomposition
from distributed_elliptic_discretizer import discretize_elliptic_block_swipdg
from distributed_elliptic_reductor import IPLRBFlatTopLocalizedCoerciveReductor as FtReductor
from test_grid_dependecy_of_Q1_localized_estimate import (
expected_results_fixed_H_refined_h,
expected_results_fixed_h_refined_H,
run_fixed_H_refined_h,
run_fixed_h_refined_H,
)
FILENAME = os.path.basename(__file__)[:-3]
set_log_levels({
FILENAME: 'INFO',
'distributed_elliptic_discretizer': 'INFO',
'pymor.discretizations': 'WARN',
})
make_problem = lambda : init_problem((1, 1))
base_config = {
'local_space_type': 'cg_p1',
'domain': domain,
'problem': make_problem()['id'],
'mu': 1,
}
def compute_on_single_refinemet(cfg):
logger = getLogger(f'{FILENAME}.compute_on_single_refinemet')
def init_dd():
return DomainDecomposition(cfg['num_macro_elements'],
cfg['num_refinements_per_subdomain'],
domain[0],
domain[1])
dd = init_dd()
p = make_problem()
num_subdomains = dd.num_subdomains
fom = discretize_elliptic_block_swipdg(make_problem, init_dd, cfg['local_space_type'])
fom.globalize() # We need this later on, only here to trigger the output
num_DoFs = fom.solution_space.dim
append_values(num_DoFs=num_DoFs)
logger.info('reducing with initial basis (PoU) ...')
reductor = FtReductor(fom, init_dd, cfg, kappa_lower=make_problem()['kappa_lower'], overlap=cfg['overlap'])
append_values(reductor=str(reductor))
append_values(basis_size=[len(b) for b in reductor.bases])
rom = reductor.reduce()
logger.info('building error product ...')
product = np.full((dd.num_subdomains, dd.num_subdomains), None)
for (ii, jj), local_prod in fom.products['broken_h1'].items():
product[ii, jj] = local_prod
product = BlockOperator(product)
logger.info('estimating ...')
mu = rom.parse_parameter(cfg['mu'])
u = rom.solve(mu=mu)
estimate = rom.estimate(u, mu=mu)[0]
logger.info('... done (is {})'.format(estimate))
append_values(estimate=estimate)
logger.info('computing error ...')
diff = fom.solve(mu) - reductor.reconstruct(u)
error = np.sqrt(product.apply2(diff, diff))[0][0]
logger.info('... done (is {})'.format(error))
append_values(error=error)
return num_subdomains, num_DoFs, error, estimate
def run_fixed_H_refined_h(overlap, num_refs=(1, 2, 3, 4, 5, 6, 7, 8)):
config = dict(base_config)
config['num_macro_elements'] = [2, 2]
config['num_refs'] = num_refs
config['overlap'] = overlap
new_dataset(FILENAME + '.fixed_H_refined_h', **config)
results = {'num_subdomains': [], 'num_DoFs': [], 'error': [], 'estimate': []}
for num_refs in config['num_refs']:
cfg = dict(config)
cfg['num_refinements_per_subdomain'] = num_refs
nsd, nD, er, es = compute_on_single_refinemet(cfg)
results['num_subdomains'].append(nsd)
results['num_DoFs'].append(nD)
results['error'].append(er)
results['estimate'].append(es)
print('')
return results
def run_fixed_h_refined_H(overlap, num_H_refs=3): # the fourth already takes 15 minutes and all ram on my notebook
config = dict(base_config)
macro_grid = [4, 4]
num_refs = 7
config['macro_grid'] = macro_grid
config['num_refs'] = num_refs
config['overlap'] = overlap
new_dataset(FILENAME + '.fixed_h_refined_H', **config)
results = {'num_subdomains': [], 'num_DoFs': [], 'error': [], 'estimate': []}
for _ in np.arange(num_H_refs):
cfg = dict(config)
cfg['num_macro_elements'] = macro_grid
cfg['num_refinements_per_subdomain'] = num_refs
nsd, nD, er, es = compute_on_single_refinemet(cfg)
results['num_subdomains'].append(nsd)
results['num_DoFs'].append(nD)
results['error'].append(er)
results['estimate'].append(es)
print('')
num_refs -= 1
macro_grid[0] *= 2
macro_grid[1] *= 2
return results
make_pou = lambda dd: ContinuousFlatTopPartitionOfUnity(dd, overlap=1)
pou_id = 'FtPoU1Overlap'
def test_fixed_H_refined_h_same_as_Q1():
results = run_fixed_H_refined_h(overlap=1, num_refs=(1, 2))
from test_grid_dependecy_of_Q1_localized_estimate import expected_results_fixed_H_refined_h
results = run_fixed_H_refined_h((1, 2), make_pou, pou_id)
expected_results = expected_results_fixed_H_refined_h()
for kk, vv in expected_results.items():
assert np.allclose(results[kk], vv)
def test_fixed_h_refined_H_same_as_Q1():
results = run_fixed_h_refined_H(overlap=1, num_H_refs=2)
from test_grid_dependecy_of_Q1_localized_estimate import expected_results_fixed_h_refined_H
results = run_fixed_h_refined_H(2, make_pou, pou_id)
expected_results = expected_results_fixed_h_refined_H()
for kk, vv in expected_results.items():
assert np.allclose(results[kk], vv)
if __name__ == '__main__':
r = run_fixed_H_refined_h(overlap=1, num_refs=(1, 2))
run_fixed_H_refined_h((1, 2), make_pou, pou_id)
print('')
print('')
r = run_fixed_h_refined_H(overlap=1, num_H_refs=2)
run_fixed_h_refined_H(2, make_pou, pou_id)
......@@ -31,7 +31,7 @@ base_config = {
}
def compute_on_single_refinemet(cfg):
def compute_on_single_refinemet(cfg, make_pou, pou_id):
logger = getLogger(f'{FILENAME}.compute_on_single_refinemet')
def init_dd():
return DomainDecomposition(cfg['num_macro_elements'],
......@@ -47,8 +47,7 @@ def compute_on_single_refinemet(cfg):
append_values(num_DoFs=num_DoFs)
logger.info('reducing with initial basis (PoU) ...')
reductor = Reductor(
fom, init_dd, cfg, make_pou=lambda dd: ContinuousLagrangePartitionOfUnity(dd),
kappa_lower=init_problem()['kappa_lower'], pou_id='Q1Pou')
fom, init_dd, cfg, make_pou=make_pou, kappa_lower=init_problem()['kappa_lower'], pou_id=pou_id)
append_values(reductor=str(reductor))
append_values(basis_size=[len(b) for b in reductor.bases])
rom = reductor.reduce()
......@@ -71,7 +70,10 @@ def compute_on_single_refinemet(cfg):
return num_subdomains, num_DoFs, error, estimate
def run_fixed_H_refined_h(num_refs=(1, 2, 3, 4, 5, 6, 7, 8)):
def run_fixed_H_refined_h(
num_refs=(1, 2, 3, 4, 5, 6, 7, 8),
make_pou=lambda dd: ContinuousLagrangePartitionOfUnity(dd),
pou_id='Q1PoU'):
config = dict(base_config)
config['num_macro_elements'] = [2, 2]
config['num_refs'] = num_refs
......@@ -81,7 +83,7 @@ def run_fixed_H_refined_h(num_refs=(1, 2, 3, 4, 5, 6, 7, 8)):
for num_refs in config['num_refs']:
cfg = dict(config)
cfg['num_refinements_per_subdomain'] = num_refs
nsd, nD, er, es = compute_on_single_refinemet(cfg)
nsd, nD, er, es = compute_on_single_refinemet(cfg, make_pou, pou_id)
results['num_subdomains'].append(nsd)
results['num_DoFs'].append(nD)
results['error'].append(er)
......@@ -91,7 +93,10 @@ def run_fixed_H_refined_h(num_refs=(1, 2, 3, 4, 5, 6, 7, 8)):
return results
def run_fixed_h_refined_H(num_H_refs=3): # the fourth already takes 15 minutes and all ram on my notebook
def run_fixed_h_refined_H(
num_H_refs=3, # the fourth already takes 15 minutes and all ram on my notebook
make_pou=lambda dd: ContinuousLagrangePartitionOfUnity(dd),
pou_id='Q1PoU'):
config = dict(base_config)
macro_grid = [4, 4]
num_refs = 7
......@@ -104,7 +109,7 @@ def run_fixed_h_refined_H(num_H_refs=3): # the fourth already takes 15 minutes a
cfg = dict(config)
cfg['num_macro_elements'] = macro_grid
cfg['num_refinements_per_subdomain'] = num_refs
nsd, nD, er, es = compute_on_single_refinemet(cfg)
nsd, nD, er, es = compute_on_single_refinemet(cfg, make_pou, pou_id)
results['num_subdomains'].append(nsd)
results['num_DoFs'].append(nD)
results['error'].append(er)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment