...
 
Commits (2)
Subproject commit fb2c0668d7c85faaa868ebecdcbf53f0d3743e20
Subproject commit a08102fed49dae633a61d7f4a0c76370c42037eb
......@@ -10,7 +10,8 @@ from hapod.hapod import local_pod, HapodParameters, binary_tree_hapod_over_ranks
from hapod.mpi import MPIWrapper
def boltzmann_binary_tree_hapod(grid_size,
def boltzmann_binary_tree_hapod(dimension,
grid_size,
chunk_size,
tol,
eval_tol=None,
......@@ -28,7 +29,7 @@ def boltzmann_binary_tree_hapod(grid_size,
# get boltzmann solver to create snapshots
mu = create_and_scatter_boltzmann_parameters(mpi.comm_world)
solver = create_boltzmann_solver(grid_size, mu, linear=linear)
solver = create_boltzmann_solver(dimension, grid_size, mu, linear=linear)
num_chunks, num_timesteps = solver_statistics(solver, chunk_size)
dt = solver.dt
......@@ -198,13 +199,14 @@ if __name__ == "__main__":
chunk_size = 6 if argc < 3 else int(sys.argv[2])
tol = 1e-3 if argc < 4 else float(sys.argv[3])
omega = 0.95 if argc < 5 else float(sys.argv[4])
inc_gramian = True if argc < 6 else not (sys.argv[5] == "False" or sys.argv[5] == "0")
dimension = 2 if argc < 6 else int(sys.argv[5])
inc_gramian = True if argc < 7 else not (sys.argv[6] == "False" or sys.argv[6] == "0")
filename = "HAPOD_binary_tree_gridsize_%d_chunksize_%d_tol_%f_omega_%f.log" % (grid_size, chunk_size, tol, omega)
logfile = open(filename, "a")
final_modes, _, _, _, total_num_snapshots, _, mu, mpi, _, _, _, _, _ = boltzmann_binary_tree_hapod(
final_modes, _, _, _, total_num_snapshots, _, mu, mpi, _, _, _, _, _ = boltzmann_binary_tree_hapod(dimension,
grid_size, chunk_size, tol * grid_size, None, omega=omega, logfile=logfile, incremental_gramian=inc_gramian)
final_modes, win = mpi.shared_memory_bcast_modes(final_modes)
calculate_error(final_modes, grid_size, mu, total_num_snapshots, mpi, logfile=logfile)
calculate_error(final_modes, dimension, grid_size, mu, total_num_snapshots, mpi, logfile=logfile)
win.Free()
logfile.close()
mpi.comm_world.Barrier()
......
......@@ -20,6 +20,7 @@ from hapod.boltzmann.utility import solver_statistics, create_boltzmann_solver
def calculate_l2_error_for_random_samples(basis,
mpi,
solver,
dimension,
grid_size,
chunk_size,
seed=MPI.COMM_WORLD.Get_rank(),
......@@ -51,20 +52,20 @@ def calculate_l2_error_for_random_samples(basis,
mu = [random.uniform(0., 8.), random.uniform(0., 8.), 0., random.uniform(0., 8.)]
# solve without saving solution to measure time
fom = DuneModel(nt, solver.dt, '', 0, grid_size, False, True, *mu, linear)
fom = DuneModel(nt, solver.dt, dimension, '', 0, grid_size, False, True, *mu, linear)
parsed_mu = fom.parse_parameter(mu)
start = timer()
fom.solve(parsed_mu, return_half_steps=False)
elapsed_high_dim += timer() - start
# now create Model that saves time steps to calculate error
fom = DuneModel(nt, solver.dt, '', 2000000, grid_size, False, False, *mu, linear)
fom = DuneModel(nt, solver.dt, dimension, '', 2000000, grid_size, False, False, *mu, linear)
assert hyper_reduction in ('none', 'projection', 'deim')
if hyper_reduction == 'projection':
lf = Concatenation([VectorArrayOperator(deim_cb), VectorArrayOperator(deim_cb, adjoint=True), fom.lf])
elif hyper_reduction == 'deim':
lf = EmpiricalInterpolatedOperator(fom.lf, deim_dofs, deim_cb, False)
fom = fom.with_(new_type=BoltzmannModelBase, lf=lf)
fom = fom.with_(new_type=BoltzmannModel, lf=lf)
print("Starting reduction ", timer() - start)
start = timer()
......@@ -81,7 +82,7 @@ def calculate_l2_error_for_random_samples(basis,
# reconstruct high-dimensional solution, calculate l^2 error
step_n = 1
curr_step = 0
solver = create_boltzmann_solver(grid_size, mu)
solver = create_boltzmann_solver(dimension, grid_size, mu)
solver.reset() # resets some static variables in C++
while not solver.finished():
next_U = solver.next_n_timesteps(step_n, False)
......@@ -121,7 +122,8 @@ if __name__ == "__main__":
L2_tol = 1e-3 if argc < 4 else float(sys.argv[3])
L2_deim_tol = 1e-3 if argc < 5 else float(sys.argv[4])
omega = 0.95 if argc < 6 else float(sys.argv[5])
timings = False if argc < 7 else bool(sys.argv[6])
dimension = 2 if argc < 7 else int(sys.argv[6])
timings = False if argc < 8 else bool(sys.argv[7])
# We want to prescribe the mean L^2 error, but the HAPOD works with the l^2 error, so rescale tolerance
tol_scale_factor = (grid_size / 7)**(3 / 2)
l2_tol = L2_tol * tol_scale_factor
......@@ -131,7 +133,7 @@ if __name__ == "__main__":
logfile = open(filename, "a")
start = timer()
(basis, eval_basis, _, _, total_num_snaps, total_num_evals, _, mpi, _, _, _, _, solver) = \
boltzmann_binary_tree_hapod(grid_size, chunk_size, l2_tol, eval_tol=l2_deim_tol, omega=omega,
boltzmann_binary_tree_hapod(dimension, grid_size, chunk_size, l2_tol, eval_tol=l2_deim_tol, omega=omega,
calc_eval_basis=True, logfile=logfile,linear=False)
logfile.close()
logfile = open(filename, "a")
......@@ -150,6 +152,7 @@ if __name__ == "__main__":
basis,
mpi,
solver,
dimension,
grid_size,
chunk_size,
deim_dofs=deim_dofs,
......
......@@ -9,7 +9,7 @@ from hapod.hapod import local_pod, HapodParameters, incremental_hapod_over_ranks
from hapod.mpi import MPIWrapper
def boltzmann_incremental_hapod(grid_size, chunk_size, tol, omega=0.95, logfile=None, incremental_gramian=True):
def boltzmann_incremental_hapod(dimension, grid_size, chunk_size, tol, omega=0.95, logfile=None, incremental_gramian=True):
start = timer()
......@@ -18,7 +18,7 @@ def boltzmann_incremental_hapod(grid_size, chunk_size, tol, omega=0.95, logfile=
# get boltzmann solver to create snapshots
mu = create_and_scatter_boltzmann_parameters(mpi.comm_world)
solver = create_boltzmann_solver(grid_size, mu)
solver = create_boltzmann_solver(dimension, grid_size, mu)
num_chunks, num_timesteps = solver_statistics(solver, chunk_size)
# calculate rooted tree depth
......@@ -100,14 +100,15 @@ if __name__ == "__main__":
chunk_size = 6 if argc < 3 else int(sys.argv[2])
tol = 1e-3 if argc < 4 else float(sys.argv[3])
omega = 0.95 if argc < 5 else float(sys.argv[4])
inc_gramian = True if argc < 6 else not (sys.argv[5] == "False" or sys.argv[5] == "0")
dimension = 2 if argc < 6 else int(sys.argv[5])
inc_gramian = True if argc < 7 else not (sys.argv[6] == "False" or sys.argv[6] == "0")
filename = "boltzmann_incremental_hapod_gridsize_%d_chunksize_%d_tol_%f_omega_%f.log" \
% (grid_size, chunk_size, tol, omega)
logfile = open(filename, "a")
final_modes, _, total_num_snapshots, mu, mpi, _, _, _ = boltzmann_incremental_hapod(
grid_size, chunk_size, tol * grid_size, omega=omega, logfile=logfile, incremental_gramian=inc_gramian)
dimension, grid_size, chunk_size, tol * grid_size, omega=omega, logfile=logfile, incremental_gramian=inc_gramian)
final_modes, win = mpi.shared_memory_bcast_modes(final_modes)
calculate_error(final_modes, grid_size, mu, total_num_snapshots, mpi, grid_size, logfile=logfile)
calculate_error(final_modes, dimension, grid_size, mu, total_num_snapshots, mpi, grid_size, logfile=logfile)
win.Free()
logfile.close()
if mpi.rank_world == 0:
......
......@@ -18,6 +18,7 @@ from hapod.boltzmann.utility import solver_statistics
def calculate_l2_error_for_random_samples(basis,
mpi,
solver,
dimension,
grid_size,
chunk_size,
seed=MPI.COMM_WORLD.Get_rank(),
......@@ -44,7 +45,7 @@ def calculate_l2_error_for_random_samples(basis,
for _ in range(params_per_rank):
mu = [random.uniform(0., 8.), random.uniform(0., 8.), 0., random.uniform(0., 8.)]
fom = DuneModel(nt, solver.dt, '', 2000000, grid_size, False, True, *mu)
fom = DuneModel(nt, solver.dt, dimension, '', 2000000, grid_size, False, True, *mu)
mu = fom.parse_parameter(mu)
......@@ -87,12 +88,13 @@ if __name__ == "__main__":
chunk_size = 6 if argc < 3 else int(sys.argv[2])
tol = 1e-3 if argc < 4 else float(sys.argv[3])
omega = 0.95 if argc < 5 else float(sys.argv[4])
dimension = 2 if argc < 6 else int(sys.argv[5])
orthonormalize = True
(basis, _, _, _, total_num_snaps, total_num_evals, _, mpi, _, _, _, _, solver) = \
boltzmann_binary_tree_hapod(grid_size, chunk_size, tol * grid_size, omega=omega, orthonormalize=orthonormalize)
boltzmann_binary_tree_hapod(dimension, grid_size, chunk_size, tol * grid_size, omega=omega, orthonormalize=orthonormalize)
basis, win = mpi.shared_memory_bcast_modes(basis, returnlistvectorarray=True)
red_errs, proj_errs, elapsed_red, elapsed_high_dim = calculate_l2_error_for_random_samples(
basis, mpi, solver, grid_size, chunk_size, basis_is_orthonormal=orthonormalize)
basis, mpi, solver, dimension, grid_size, chunk_size, basis_is_orthonormal=orthonormalize)
red_err = np.sqrt(np.sum(red_errs) / total_num_snaps) / grid_size if mpi.rank_world == 0 else None
proj_err = np.sqrt(np.sum(proj_errs) / total_num_snaps) / grid_size if mpi.rank_world == 0 else None
......
......@@ -9,14 +9,14 @@ from hapod.boltzmann.utility import calculate_error, create_and_scatter_boltzman
from hapod.mpi import MPIWrapper
def boltzmann_pod(grid_size, tol, logfile=None):
def boltzmann_pod(dimension, grid_size, tol, logfile=None):
# get MPI communicators
mpi = MPIWrapper()
# get boltzmann solver to create snapshots
mu = create_and_scatter_boltzmann_parameters(mpi.comm_world)
solver = create_boltzmann_solver(grid_size, mu)
solver = create_boltzmann_solver(dimension, grid_size, mu)
# calculate Boltzmann problem trajectory
start = timer()
......@@ -51,11 +51,12 @@ if __name__ == "__main__":
argc = len(sys.argv)
grid_size = 20 if argc < 2 else int(sys.argv[1])
tol = 1e-3 if argc < 3 else float(sys.argv[2])
dimension = 2 if argc < 4 else int(sys.argv[3])
filename = "POD_gridsize_%d_tol_%f.log" % (grid_size, tol)
logfile = open(filename, "a")
final_modes, _, total_num_snapshots, mu, mpi, _, _, _ = boltzmann_pod(grid_size, tol * grid_size, logfile=logfile)
final_modes, _, total_num_snapshots, mu, mpi, _, _, _ = boltzmann_pod(dimension, grid_size, tol * grid_size, logfile=logfile)
final_modes, win = mpi.shared_memory_bcast_modes(final_modes)
calculate_error(final_modes, grid_size, mu, total_num_snapshots, mpi, logfile=logfile)
calculate_error(final_modes, dimension, grid_size, mu, total_num_snapshots, mpi, logfile=logfile)
win.Free()
logfile.close()
if mpi.rank_world == 0:
......
......@@ -6,10 +6,10 @@ from hapod.boltzmann import wrapper
from hapod.boltzmann.utility import create_and_scatter_boltzmann_parameters
def visualize_solutions(grid_size=50):
def visualize_solutions(grid_size=50,dimension=2):
comm_world = MPI.COMM_WORLD
parameters = create_and_scatter_boltzmann_parameters(comm_world)
solver = wrapper.Solver(
solver = wrapper.Solver(dimension,
"boltzmann_snapshot_sigma_s1_%g_s2_%g_a1_%g_a2_%g" % (parameters[0], parameters[1], parameters[2],
parameters[3]), 1, grid_size, True, False, parameters[0],
parameters[1], parameters[2], parameters[3])
......@@ -20,4 +20,5 @@ def visualize_solutions(grid_size=50):
if __name__ == "__main__":
argc = len(sys.argv)
grid_size = 20 if argc < 2 else int(sys.argv[1])
visualize_solutions(grid_size)
dimension = 2 if argc < 3 else int(sys.argv[2])
visualize_solutions(grid_size, dimension)
Subproject commit aae23fce4d841ce753b40e338a0db9e25ac60576
Subproject commit 58511b72b0385abe5e8251276055955a1a935298
Subproject commit 5aafc9fc8319a05d3bed3f7daf7b16d2830cad2b
Subproject commit 58ad784133a53d4a91131007255f1536b0368dfd
Subproject commit 1cde2bf163fde8acfc95ed71596a6781fa297ec1
Subproject commit c0ec05fdd1ec9fac1f07b8761f4b43b86c3528b3
Subproject commit fb1e50722e937518a14ec6f34aaf87c86a6eaa5d
Subproject commit 40f60b18ca7f9e25347b2f3310f23c17f628e490
Subproject commit dbf7c687341a657e0722a661963c13418c27b1a6
Subproject commit b4b1a9fc097f807317a4cc7171256a1ee4881e20
Subproject commit 5739ca9c42c3fe2a2fea0989e7fbde85fb0516aa
Subproject commit c6f0d31498b387467def10845f6bb90329ec849b
......@@ -31,8 +31,8 @@ def create_and_scatter_boltzmann_parameters(comm, min_param=0., max_param=8.):
return comm.scatter(parameters_list, root=0)
def create_boltzmann_solver(gridsize, mu, linear=True):
return wrapper.Solver(
def create_boltzmann_solver(dimension, gridsize, mu, linear=True):
return wrapper.Solver(dimension,
"boltzmann_sigma_s_s_" + str(mu[0]) + "_a_" + str(mu[1]) + "sigma_t_s_" + str(mu[2]) + "_a_" + str(mu[3]),
2000000, gridsize, False, False, *mu, linear)
......@@ -48,9 +48,9 @@ def solver_statistics(solver, chunk_size, with_half_steps=True):
return num_chunks, num_time_steps
def calculate_trajectory_error(final_modes, grid_size, mu, with_half_steps=True):
def calculate_trajectory_error(final_modes, dimension, grid_size, mu, with_half_steps=True):
error = 0
solver = create_boltzmann_solver(grid_size, mu)
solver = create_boltzmann_solver(dimension, grid_size, mu)
while not solver.finished():
next_vectors = solver.next_n_timesteps(1, with_half_steps)
next_vectors_np = NumpyVectorSpace(next_vectors[0].dim).from_data(next_vectors.data)
......@@ -59,12 +59,13 @@ def calculate_trajectory_error(final_modes, grid_size, mu, with_half_steps=True)
def calculate_total_projection_error(final_modes,
dimension,
grid_size,
mu,
total_num_snapshots,
mpi_wrapper,
with_half_steps=True):
trajectory_error = calculate_trajectory_error(final_modes, grid_size, mu, with_half_steps)
trajectory_error = calculate_trajectory_error(final_modes, dimension, grid_size, mu, with_half_steps)
trajectory_errors = mpi_wrapper.comm_world.gather(trajectory_error, root=0)
error = 0
if mpi_wrapper.rank_world == 0:
......@@ -72,11 +73,11 @@ def calculate_total_projection_error(final_modes,
return error
def calculate_error(final_modes, grid_size, mu, total_num_snapshots, mpi_wrapper, with_half_steps=True, logfile=None):
def calculate_error(final_modes, dimension, grid_size, mu, total_num_snapshots, mpi_wrapper, with_half_steps=True, logfile=None):
''' Calculates projection error. As we cannot store all snapshots due to memory restrictions, the
problem is solved again and the error calculated on the fly'''
start = timer()
err = calculate_total_projection_error(final_modes, grid_size, mu, total_num_snapshots, mpi_wrapper,
err = calculate_total_projection_error(final_modes, dimension, grid_size, mu, total_num_snapshots, mpi_wrapper,
with_half_steps)
err = err if grid_size == 0 else err / grid_size
elapsed = timer() - start
......
......@@ -20,7 +20,7 @@ from pymor.vectorarrays.numpy import NumpyVectorArray, NumpyVectorSpace
from gdt.vectors import CommonDenseVector
import gdt.boltzmann
from hapod.xt import DuneXtLaListVectorSpace
from hapod.xt import DuneXtLaVector, DuneXtLaListVectorSpace
#import cvxopt
#from cvxopt import matrix as cvxmatrix
......@@ -32,9 +32,9 @@ PARAMETER_TYPE = ParameterType({'s': (4,)})
class Solver(Parametric):
def __init__(self, *args):
self.impl = gdt.boltzmann.BoltzmannSolver2d(*args)
#self.impl = gdt.boltzmann.BoltzmannSolver3d(*args)
def __init__(self, dimension=2, *args):
assert dimension == 2 or dimension == 3
self.impl = gdt.boltzmann.BoltzmannSolver3d(*args) if dimension == 2 else gdt.boltzmann.BoltzmannSolver3d(*args)
self.last_mu = None
self.solution_space = DuneXtLaListVectorSpace(self.impl.get_initial_values().dim)
self.build_parameter_type(PARAMETER_TYPE)
......@@ -197,7 +197,7 @@ class DuneModel(BoltzmannModel):
super().__init__(initial_data=initial_data,
lf=lf_operator,
rhs=rhs_operator,
t_end=solver.t_end(),
t_end=solver.t_end,
nt=nt,
dt=dt,
parameter_space=param_space,
......
Subproject commit e3371b34c637820596d21af527758e0397093ba5
Subproject commit 3682cee5fa922216f0e6af24f03a97c75115071b
import random
import sys
import time
from timeit import default_timer as timer
from mpi4py import MPI
import numpy as np
from pymor.algorithms.ei import deim
from pymor.reductors.basic import GenericRBReductor
from pymor.operators.constructions import Concatenation, VectorArrayOperator
from pymor.operators.ei import EmpiricalInterpolatedOperator
from boltzmann.wrapper import DuneModel
from boltzmann_binary_tree_hapod import boltzmann_binary_tree_hapod
from boltzmannutility import solver_statistics
#from cvxopt import matrix as cvxmatrix
def calculate_l2_error_for_random_samples(basis,
mpi,
solver,
grid_size,
chunk_size,
seed=MPI.COMM_WORLD.Get_rank(),
params_per_rank=2,
with_half_steps=True,
basis_is_orthonormal=True,
eval_basis=None,
deim_dofs=None,
deim_cb=None,
hyper_reduction='deim'):
'''Calculates model reduction and projection error for random parameter'''
random.seed(seed)
_, num_time_steps = solver_statistics(solver, chunk_size, with_half_steps)
nt = int(num_time_steps - 1) if not with_half_steps else int((num_time_steps - 1) / 2)
elapsed_high_dim = elapsed_red = red_errs = proj_errs = 0.
# realizable projection in reduced space (for hatfunctions)
#high_dim = basis.to_numpy().shape[1]
#red_dim = len(basis)
#print(high_dim, red_dim)
#cvxopt_P = cvxmatrix(np.eye(red_dim, dtype=float))
#cvxopt_G = cvxmatrix(-basis.to_numpy(ensure_copy=True).transpose())
#cvxopt_h = cvxmatrix(-1e-8, (high_dim, 1))
for _ in range(params_per_rank):
mu = [random.uniform(0., 8.), random.uniform(0., 8.), 0., random.uniform(0., 8.)]
d = DuneModel(nt, solver.time_step_length(), '', 2000000, grid_size, False, True, *mu)
mu = d.parse_parameter(mu)
# calculate high-dimensional solution
start = timer()
U = d.solve(mu, return_half_steps=False)
elapsed_high_dim += timer() - start
# create reduced problem
d = d.as_generic_type()
assert hyper_reduction in ('none', 'projection', 'deim')
if hyper_reduction == 'projection':
lf = Concatenation([VectorArrayOperator(eval_basis), VectorArrayOperator(eval_basis, adjoint=True), d.lf])
d = d.with_(lf=lf)
elif hyper_reduction == 'deim':
lf = EmpiricalInterpolatedOperator(d.lf, deim_dofs, deim_cb, False)
d = d.with_(lf=lf)
start = timer()
reductor = GenericRBReductor(d.as_generic_type(), basis, basis_is_orthonormal=basis_is_orthonormal)
rd = reductor.reduce()
print("Reduction took ", timer() - start)
# solve reduced problem
start = timer()
# U_rb = rd.solve(mu, cvxopt_P=cvxopt_P, cvxopt_G=cvxopt_G, cvxopt_h=cvxopt_h,basis=basis)
for _ in range(10000):
mu = [random.uniform(0., 8.), random.uniform(0., 8.), 0., random.uniform(0., 8.)]
U_rb = rd.solve(mu)
elapsed_red += timer() - start
# reconstruct high-dimensional solution, calculate l^2 error
U_rb = reductor.reconstruct(U_rb)
red_errs += np.sum((U - U_rb).l2_norm()**2)
proj_errs += np.sum((U - basis.lincomb(U.dot(basis))).l2_norm()**2)
elapsed_high_dim /= params_per_rank
elapsed_red /= params_per_rank
red_errs /= params_per_rank
proj_errs /= params_per_rank
red_errs = mpi.comm_world.gather(red_errs, root=0)
proj_errs = mpi.comm_world.gather(proj_errs, root=0)
elapsed_red = mpi.comm_world.gather(elapsed_red, root=0)
elapsed_high_dim = mpi.comm_world.gather(elapsed_high_dim, root=0)
return red_errs, proj_errs, elapsed_red, elapsed_high_dim
if __name__ == "__main__":
'''Computes HAPOD to get reduced basis and then calculate projection and model reduction error for random samples'''
grid_size = int(sys.argv[1])
chunk_size = int(sys.argv[2])
L2_tol = float(sys.argv[3])
L2_deim_tol = float(sys.argv[4])
omega = float(sys.argv[5])
orthonormalize = True
# We want to prescribe the mean L^2 error, but the HAPOD works with the l^2 error, so rescale tolerance
tol_scale_factor = (grid_size / 7)**(3 / 2)
l2_tol = L2_tol * tol_scale_factor
l2_deim_tol = L2_deim_tol * tol_scale_factor
start = timer()
(basis, eval_basis, _, _, total_num_snaps, total_num_evals, _, mpi, _, _, _, _, solver) = \
boltzmann_binary_tree_hapod(grid_size, chunk_size, l2_tol, eval_tol=l2_deim_tol, omega=omega,
orthonormalize=orthonormalize, calc_eval_basis=True)
elapsed_basis_gen = timer() - start
basis = mpi.shared_memory_bcast_modes(basis, returnlistvectorarray=True)
eval_basis = mpi.shared_memory_bcast_modes(eval_basis, returnlistvectorarray=True)
if mpi.rank_world == 0:
deim_dofs, deim_cb, _ = deim(eval_basis, len(eval_basis))
assert len(deim_cb) == len(eval_basis)
else:
deim_dofs = deim_cb = None
deim_dofs = mpi.comm_world.bcast(deim_dofs, root=0)
deim_cb = mpi.shared_memory_bcast_modes(deim_cb, returnlistvectorarray=True)
# the function returns arrays of l^2 errors, each entry representing results for one snapshot
red_errs, proj_errs, elapsed_red, elapsed_high_dim = calculate_l2_error_for_random_samples(
basis,
mpi,
solver,
grid_size,
chunk_size,
basis_is_orthonormal=orthonormalize,
eval_basis=eval_basis,
deim_dofs=deim_dofs,
deim_cb=deim_cb,
hyper_reduction='deim')
# calculate mean l^2 errors, convert back to L^2 errors, calculate mean execution times
red_err = np.sqrt(np.sum(red_errs) / total_num_snaps) / tol_scale_factor if mpi.rank_world == 0 else None
proj_err = np.sqrt(np.sum(proj_errs) / total_num_snaps) / tol_scale_factor if mpi.rank_world == 0 else None
elapsed_red_mean = np.sum(elapsed_red) / len(elapsed_red) if mpi.rank_world == 0 else None
elapsed_high_dim_mean = np.sum(elapsed_high_dim) / len(elapsed_high_dim) if mpi.rank_world == 0 else None
if mpi.rank_world == 0:
print("\n\n\nResults:\n")
print('Creating the bases %g seconds.' % elapsed_basis_gen)
print('Solving the high-dimensional problem took %g seconds on average.' % elapsed_high_dim_mean)
print('Solving the reduced problem took %g seconds on average.' % elapsed_red_mean)
print('The mean L2 reduction error and mean L2 projection error were %g and %g, respectively.' % (red_err,
proj_err))
print('Basis size and collateral basis size were %g and %g, respectively.' % (len(basis), len(eval_basis)))