diff --git a/dune/gdt/operators/advection-fv-entropybased.hh b/dune/gdt/operators/advection-fv-entropybased.hh
index a58845a244775624d357f25c68c4cb8ab8d9a49f..ba857bdf2e2d357894745b889e9b86af5a889b82 100644
--- a/dune/gdt/operators/advection-fv-entropybased.hh
+++ b/dune/gdt/operators/advection-fv-entropybased.hh
@@ -92,6 +92,7 @@ public:
     , advection_op_(advection_op)
     , rhs_op_(rhs_op)
     , inverse_hessian_operator_(inverse_hessian_operator)
+    , reg_indicators_(advection_op_.source_space().grid_view().size(0), false)
   {}
 
   bool linear() const override final
@@ -119,13 +120,20 @@ public:
     u_update *= -1.;
     rhs_op_.apply(source, rhs_update, param);
     u_update += rhs_update;
-    inverse_hessian_operator_.apply_inverse_hessian(source, u_update, range, param);
+    std::fill(reg_indicators_.begin(), reg_indicators_.end(), false);
+    inverse_hessian_operator_.apply_inverse_hessian(source, u_update, reg_indicators_, range, param);
+  }
+
+  const std::vector<bool> reg_indicators() const
+  {
+    return reg_indicators_;
   }
 
   const DensityOperatorType& density_op_;
   const AdvectionOperatorType& advection_op_;
   const RhsOperatorType& rhs_op_;
   const InverseHessianOperatorType& inverse_hessian_operator_;
+  mutable std::vector<bool> reg_indicators_;
 }; // class EntropicCoordinatesOperator<...>
 
 
diff --git a/dune/gdt/operators/advection-with-reconstruction.hh b/dune/gdt/operators/advection-with-reconstruction.hh
index 0fb6eb227585d62b33342be28323996de27914a1..ba5bc9aff9b4bb0e9449bff70e23e8d67b79437d 100644
--- a/dune/gdt/operators/advection-with-reconstruction.hh
+++ b/dune/gdt/operators/advection-with-reconstruction.hh
@@ -100,6 +100,8 @@ public:
                                                const ReconstructionOperatorType& reconstruction_operator)
     : advection_operator_(advection_operator)
     , reconstruction_operator_(reconstruction_operator)
+    , reconstructed_values_(source_space().grid_view().indexSet().size(0))
+    , reconstructed_function_(source_space().grid_view(), reconstructed_values_)
   {}
 
   bool linear() const override final
@@ -120,19 +122,17 @@ public:
   void apply(const VectorType& source, VectorType& range, const XT::Common::Parameter& param) const override final
   {
     // do reconstruction
-    typename ReconstructionOperatorType::ReconstructedValuesType reconstructed_values(
-        source_space().grid_view().indexSet().size(0));
-    typename ReconstructionOperatorType::ReconstructedFunctionType reconstructed_function(source_space().grid_view(),
-                                                                                          reconstructed_values);
-    reconstruction_operator_.apply(source, reconstructed_function, param);
+    reconstruction_operator_.apply(source, reconstructed_function_, param);
 
     // apply advection operator
     std::fill(range.begin(), range.end(), 0.);
-    advection_operator_.apply(reconstructed_function, range, param);
+    advection_operator_.apply(reconstructed_function_, range, param);
   }
 
   const AdvectionOperatorType& advection_operator_;
   const ReconstructionOperatorType& reconstruction_operator_;
+  typename ReconstructionOperatorType::ReconstructedValuesType reconstructed_values_;
+  mutable typename ReconstructionOperatorType::ReconstructedFunctionType reconstructed_function_;
 }; // class AdvectionWithReconstructionOperator<...>
 
 
diff --git a/dune/gdt/test/momentmodels/density_evaluations.hh b/dune/gdt/test/momentmodels/density_evaluations.hh
index ff863a98f33d8520c67ada453c52eb1ca61aa87b..6258f9996defd9ae047ceeb49ebb9dd728da0cec 100644
--- a/dune/gdt/test/momentmodels/density_evaluations.hh
+++ b/dune/gdt/test/momentmodels/density_evaluations.hh
@@ -45,13 +45,16 @@ public:
 
   explicit LocalDensityEvaluator(const SpaceType& space,
                                  const VectorType& alpha_dofs,
+                                 VectorType& range_dofs,
                                  EntropyFluxType& analytical_flux,
                                  const BoundaryDistributionType& boundary_distribution,
                                  const RangeFieldType min_acceptable_density,
                                  const XT::Common::Parameter& param)
     : space_(space)
     , alpha_(space_, alpha_dofs, "alpha")
+    , range_(space_, range_dofs, "regularized alpha")
     , local_alpha_(alpha_.local_discrete_function())
+    , local_range_(range_.local_discrete_function())
     , analytical_flux_(analytical_flux)
     , boundary_distribution_(boundary_distribution)
     , min_acceptable_density_(min_acceptable_density)
@@ -63,7 +66,9 @@ public:
     : BaseType(other)
     , space_(other.space_)
     , alpha_(space_, other.alpha_.dofs().vector(), "source")
+    , range_(space_, other.range_.dofs().vector(), "range")
     , local_alpha_(alpha_.local_discrete_function())
+    , local_range_(range_.local_discrete_function())
     , analytical_flux_(other.analytical_flux_)
     , boundary_distribution_(other.boundary_distribution_)
     , min_acceptable_density_(other.min_acceptable_density_)
@@ -79,11 +84,14 @@ public:
   void apply_local(const EntityType& entity) override final
   {
     local_alpha_->bind(entity);
+    local_range_->bind(entity);
     const auto entity_index = index_set_.index(entity);
     XT::Common::FieldVector<RangeFieldType, dimRange> alpha;
     for (size_t ii = 0; ii < dimRange; ++ii)
       alpha[ii] = local_alpha_->dofs().get_entry(ii);
     analytical_flux_.store_evaluations(entity_index, alpha);
+    for (size_t ii = 0; ii < dimRange; ++ii)
+      local_range_->dofs().set_entry(ii, alpha[ii]);
     for (auto&& intersection : Dune::intersections(space_.grid_view(), entity))
       if (intersection.boundary())
         analytical_flux_.store_boundary_evaluations(
@@ -94,7 +102,9 @@ public:
 private:
   const SpaceType& space_;
   const ConstDiscreteFunctionType alpha_;
+  DiscreteFunctionType range_;
   std::unique_ptr<typename ConstDiscreteFunctionType::ConstLocalDiscreteFunctionType> local_alpha_;
+  std::unique_ptr<typename DiscreteFunctionType::LocalDiscreteFunctionType> local_range_;
   EntropyFluxType& analytical_flux_;
   const BoundaryDistributionType& boundary_distribution_;
   const RangeFieldType min_acceptable_density_;
@@ -147,17 +157,17 @@ public:
     return space_;
   }
 
-  void
-  apply(const VectorType& alpha, VectorType& /*range*/, const XT::Common::Parameter& param = {}) const override final
+  void apply(const VectorType& alpha, VectorType& range, const XT::Common::Parameter& param = {}) const override final
   {
-    analytical_flux_.exp_evaluations().resize(space_.grid_view().size(0));
+    const auto num_entities = space_.grid_view().size(0);
+    analytical_flux_.exp_evaluations().resize(num_entities);
     if (EntropyFluxType::entropy != EntropyType::MaxwellBoltzmann) {
-      analytical_flux_.eta_ast_prime_evaluations().resize(space_.grid_view().size(0));
-      analytical_flux_.eta_ast_twoprime_evaluations().resize(space_.grid_view().size(0));
+      analytical_flux_.eta_ast_prime_evaluations().resize(num_entities);
+      analytical_flux_.eta_ast_twoprime_evaluations().resize(num_entities);
     }
-    analytical_flux_.boundary_distribution_evaluations().resize(space_.grid_view().size(0));
+    analytical_flux_.boundary_distribution_evaluations().resize(num_entities);
     LocalDensityEvaluatorType local_density_evaluator(
-        space_, alpha, analytical_flux_, boundary_distribution_, min_acceptable_density_, param);
+        space_, alpha, range, analytical_flux_, boundary_distribution_, min_acceptable_density_, param);
     auto walker = XT::Grid::Walker<typename SpaceType::GridViewType>(space_.grid_view());
     walker.append(local_density_evaluator);
     walker.walk(true);
diff --git a/dune/gdt/test/momentmodels/entropic-coords-mn-discretization.hh b/dune/gdt/test/momentmodels/entropic-coords-mn-discretization.hh
index c9118945ccb0e796b43480f3e25ef5562e2da8d6..5f8f1161061e85e27c4a61d8cf1746d037b991b8 100644
--- a/dune/gdt/test/momentmodels/entropic-coords-mn-discretization.hh
+++ b/dune/gdt/test/momentmodels/entropic-coords-mn-discretization.hh
@@ -33,7 +33,7 @@
 #include <dune/gdt/test/momentmodels/entropyflux.hh>
 #include <dune/gdt/test/momentmodels/hessianinverter.hh>
 #include <dune/gdt/test/momentmodels/density_evaluations.hh>
-#include <dune/gdt/tools/timestepper/adaptive-rungekutta.hh>
+#include <dune/gdt/tools/timestepper/adaptive-rungekutta-kinetic.hh>
 #include <dune/gdt/tools/timestepper/explicit-rungekutta.hh>
 #include <dune/gdt/tools/timestepper/fractional-step.hh>
 #include <dune/gdt/tools/timestepper/matrix-exponential-kinetic-isotropic.hh>
@@ -72,8 +72,9 @@ struct HyperbolicEntropicCoordsMnDiscretization
     using RangeFieldType = typename MomentBasis::RangeFieldType;
     static constexpr size_t dimDomain = MomentBasis::dimDomain;
     static constexpr size_t dimRange = MomentBasis::dimRange;
-    using MatrixType = typename XT::LA::Container<RangeFieldType>::MatrixType;
-    using VectorType = typename XT::LA::Container<RangeFieldType>::VectorType;
+    static const auto la_backend = TestCaseType::la_backend;
+    using MatrixType = typename XT::LA::Container<RangeFieldType, la_backend>::MatrixType;
+    using VectorType = typename XT::LA::Container<RangeFieldType, la_backend>::VectorType;
     using GenericFunctionType = XT::Functions::GenericFunction<dimDomain, dimRange, 1, RangeFieldType>;
     using DomainType = FieldVector<RangeFieldType, dimDomain>;
     using RangeType = FieldVector<RangeFieldType, dimRange>;
@@ -149,7 +150,7 @@ struct HyperbolicEntropicCoordsMnDiscretization
     // ******************** choose flux and rhs operator and timestepper ******************************************
 
     using AdvectionOperatorType = AdvectionFvOperator<MatrixType, GV, dimRange>;
-    using HessianInverterType = EntropicHessianInverter<MomentBasis, SpaceType, slope>;
+    using HessianInverterType = EntropicHessianInverter<MomentBasis, SpaceType, slope, MatrixType>;
 #if 0
     using ReconstructionOperatorType = PointwiseLinearReconstructionNoCharOperator<
                                                                              GV,
@@ -169,7 +170,7 @@ struct HyperbolicEntropicCoordsMnDiscretization
     //        HessianInverterType>;
     using NonEntropicRhsOperatorType = LocalizableOperator<MatrixType, GV, dimRange>;
     //    using RhsOperatorType = EntropicCoordinatesOperator<NonEntropicRhsOperatorType, HessianInverterType>;
-    using DensityOperatorType = DensityEvaluator<MomentBasis, SpaceType, slope>;
+    using DensityOperatorType = DensityEvaluator<MomentBasis, SpaceType, slope, MatrixType>;
     using CombinedOperatorType = EntropicCoordinatesCombinedOperator<DensityOperatorType,
                                                                      NonEntropicAdvectionOperatorType,
                                                                      NonEntropicRhsOperatorType,
@@ -185,16 +186,18 @@ struct HyperbolicEntropicCoordsMnDiscretization
     //                                      TimeStepperMethods::explicit_euler>;
 
     //    using OperatorTimeStepperType =
-    //        AdaptiveRungeKuttaTimeStepper<FvOperatorType,
+    //        KineticAdaptiveRungeKuttaTimeStepper<FvOperatorType,
     //                                      DiscreteFunctionType>;
     //    using RhsTimeStepperType =
-    //        AdaptiveRungeKuttaTimeStepper<RhsOperatorType,
+    //        KineticAdaptiveRungeKuttaTimeStepper<RhsOperatorType,
     //                                      DiscreteFunctionType>;
 
     //    using TimeStepperType = FractionalTimeStepper<OperatorTimeStepperType, RhsTimeStepperType>;
 
-    using TimeStepperType =
-        AdaptiveRungeKuttaTimeStepper<CombinedOperatorType, DiscreteFunctionType, TimeStepperMethods::dormand_prince>;
+    using TimeStepperType = KineticAdaptiveRungeKuttaTimeStepper<CombinedOperatorType,
+                                                                 DiscreteFunctionType,
+                                                                 EntropyFluxType,
+                                                                 TimeStepperMethods::dormand_prince>;
 
     // *************** Calculate dx and initial dt **************************************
     Dune::XT::Grid::Dimensions<GV> dimensions(grid_view);
@@ -285,8 +288,9 @@ struct HyperbolicEntropicCoordsMnDiscretization
       auto ret = u_elem * (-sigma_t_value);
       ret += u_iso * basis_functions->density(u_elem) * sigma_s_value;
       ret += basis_integrated * Q_value;
-      for (size_t ii = 0; ii < local_range.dofs().size(); ++ii)
-        local_range.dofs()[ii] += ret[ii];
+      auto& range_dofs = local_range.dofs();
+      for (size_t ii = 0; ii < dimRange; ++ii)
+        range_dofs[ii] += ret[ii];
     };
     NonEntropicRhsOperatorType non_entropic_rhs_operator(grid_view, fv_space, fv_space);
     non_entropic_rhs_operator.append(GenericLocalElementOperator<VectorType, GV, dimRange>(rhs_func));
@@ -298,13 +302,21 @@ struct HyperbolicEntropicCoordsMnDiscretization
     //    OperatorTimeStepperType timestepper_op(fv_operator, alpha, -1.0);
     //    RhsTimeStepperType timestepper_rhs(rhs_operator, alpha, 1.0);
     //    TimeStepperType timestepper(timestepper_op, timestepper_rhs);
-    TimeStepperType timestepper(combined_operator, alpha, 1.);
+    TimeStepperType timestepper(combined_operator, *analytical_flux, alpha, 1.);
 
     auto begin_time = std::chrono::steady_clock::now();
     auto visualizer = std::make_unique<XT::Functions::GenericVisualizer<dimRange, 1, double>>(
         1, [&basis_functions, &analytical_flux](const int /*comp*/, const auto& val) {
           return basis_functions->density(analytical_flux->get_u(val));
         });
+    // auto visualizer = std::make_unique<XT::Functions::GenericVisualizer<dimRange, 1, double>>(
+    //     1, [](const int /*comp*/, const auto& val) {
+    //       double ret = 0.;
+    //       for (const auto& entry : val)
+    //         ret = std::max(std::abs(entry), ret);
+    //       return ret;
+    //     });
+
     timestepper.solve(t_end,
                       dt,
                       num_save_steps,
@@ -352,14 +364,14 @@ struct HyperbolicEntropicCoordsMnTest
   void run()
   {
     auto norms = HyperbolicEntropicCoordsMnDiscretization<TestCaseType>::run(
-                     1,
+                     100,
                      0,
                      TestCaseType::quad_order,
                      TestCaseType::quad_refinements,
                      "",
                      2,
                      TestCaseType::t_end,
-                     "test_kinetic",
+                     "test_kinetic_alpha",
                      Dune::GDT::is_full_moment_basis<typename TestCaseType::MomentBasis>::value)
                      .first;
     const double l1norm = norms[0];
diff --git a/dune/gdt/test/momentmodels/entropyflux_kineticcoords.hh b/dune/gdt/test/momentmodels/entropyflux_kineticcoords.hh
index a948f7f7441a008b23034e43b8b487ce895eb651..1da29acc543df216472bfd5379a7a63661cae12a 100644
--- a/dune/gdt/test/momentmodels/entropyflux_kineticcoords.hh
+++ b/dune/gdt/test/momentmodels/entropyflux_kineticcoords.hh
@@ -188,7 +188,7 @@ public:
     implementation_->apply_inverse_hessian((*eta_ast_twoprime_evaluations_)[entity_index], u, Hinv_u);
   }
 
-  void store_evaluations(const size_t entity_index, const StateType& alpha)
+  void store_evaluations(const size_t entity_index, StateType& alpha, bool check = true)
   {
     implementation_->store_exp_evaluations(exp_evaluations_[entity_index], alpha);
     if (entropy != EntropyType::MaxwellBoltzmann) {
@@ -196,6 +196,19 @@ public:
       implementation_->store_eta_ast_twoprime_vals(exp_evaluations_[entity_index],
                                                    eta_ast_twoprime_storage_[entity_index]);
     }
+    set_eta_ast_pointers();
+    // check for inf and nan and very low densities
+    if (check) {
+      const auto& u = get_u(entity_index);
+      for (auto&& entry : u)
+        if (std::isnan(entry) || std::isinf(entry))
+          DUNE_THROW(Dune::MathError, "inf or nan in u!");
+      const auto rho = basis_functions().density(u);
+      if (rho < 1e-9) {
+        alpha = basis_functions().alpha_iso(1e-8);
+        store_evaluations(entity_index, alpha, false);
+      }
+    }
   }
 
   void set_eta_ast_pointers()
@@ -258,7 +271,6 @@ public:
     return boundary_distribution_evaluations_;
   }
 
-
 private:
   std::shared_ptr<ImplementationType> implementation_;
   std::vector<QuadratureWeightsType> exp_evaluations_;
diff --git a/dune/gdt/test/momentmodels/hessianinverter.hh b/dune/gdt/test/momentmodels/hessianinverter.hh
index bbe655e48213bd20ffbb9d8552212852fc5c5ec3..6787faa1bbc5c93936887ef1c6906797dc13cf33 100644
--- a/dune/gdt/test/momentmodels/hessianinverter.hh
+++ b/dune/gdt/test/momentmodels/hessianinverter.hh
@@ -42,12 +42,14 @@ public:
   explicit LocalEntropicHessianInverter(const SpaceType& space,
                                         const VectorType& alpha_dofs,
                                         const VectorType& u_update_dofs,
+                                        std::vector<bool>& reg_indicators,
                                         VectorType& alpha_range_dofs,
                                         const EntropyFluxType& analytical_flux,
                                         const XT::Common::Parameter& param)
     : space_(space)
     , alpha_(space_, alpha_dofs, "alpha")
     , u_update_(space_, u_update_dofs, "u_update")
+    , reg_indicators_(reg_indicators)
     , local_alpha_(alpha_.local_discrete_function())
     , local_u_update_(u_update_.local_discrete_function())
     , range_(space_, alpha_range_dofs, "range")
@@ -61,6 +63,7 @@ public:
     , space_(other.space_)
     , alpha_(space_, other.alpha_.dofs().vector(), "source")
     , u_update_(space_, other.u_update_.dofs().vector(), "source")
+    , reg_indicators_(other.reg_indicators_)
     , local_alpha_(alpha_.local_discrete_function())
     , local_u_update_(u_update_.local_discrete_function())
     , range_(space_, other.range_.dofs().vector(), "range")
@@ -81,7 +84,16 @@ public:
     XT::Common::FieldVector<RangeFieldType, dimRange> u, Hinv_u;
     for (size_t ii = 0; ii < dimRange; ++ii)
       u[ii] = local_u_update_->dofs().get_entry(ii);
-    analytical_flux_.apply_inverse_hessian(space_.grid_view().indexSet().index(entity), u, Hinv_u);
+    const auto entity_index = space_.grid_view().indexSet().index(entity);
+    try {
+      analytical_flux_.apply_inverse_hessian(entity_index, u, Hinv_u);
+    } catch (const Dune::MathError& e) {
+      if (param_.has_key("reg") && param_.get("reg")[0]) {
+        reg_indicators_[entity_index] = true;
+        return;
+      } else
+        throw e;
+    }
     for (auto&& entry : Hinv_u)
       if (std::isnan(entry) || std::isinf(entry)) {
         //        std::cout << "x: " << entity.geometry().center() << "u: " << u << ", alpha: " << alpha << ", Hinv_u: "
@@ -97,6 +109,7 @@ private:
   const SpaceType& space_;
   const ConstDiscreteFunctionType alpha_;
   const ConstDiscreteFunctionType u_update_;
+  std::vector<bool>& reg_indicators_;
   std::unique_ptr<typename ConstDiscreteFunctionType::ConstLocalDiscreteFunctionType> local_alpha_;
   std::unique_ptr<typename ConstDiscreteFunctionType::ConstLocalDiscreteFunctionType> local_u_update_;
   DiscreteFunctionType range_;
@@ -152,11 +165,12 @@ public:
 
   void apply_inverse_hessian(const VectorType& alpha,
                              const VectorType& u_update,
+                             std::vector<bool>& reg_indicators,
                              VectorType& alpha_update,
                              const XT::Common::Parameter& param) const
   {
     LocalEntropicHessianInverter<SpaceType, VectorType, MomentBasis, slope> local_hessian_inverter(
-        space_, alpha, u_update, alpha_update, analytical_flux_, param);
+        space_, alpha, u_update, reg_indicators, alpha_update, analytical_flux_, param);
     auto walker = XT::Grid::Walker<typename SpaceType::GridViewType>(space_.grid_view());
     walker.append(local_hessian_inverter);
     walker.walk(true);
diff --git a/dune/gdt/test/momentmodels/kinetictransport/sourcebeam.hh b/dune/gdt/test/momentmodels/kinetictransport/sourcebeam.hh
index 184277d80b73614dd4d51d233de4eaa14abc2ee4..92e6da5972060d74592b08eba6187e622bdd3e23 100644
--- a/dune/gdt/test/momentmodels/kinetictransport/sourcebeam.hh
+++ b/dune/gdt/test/momentmodels/kinetictransport/sourcebeam.hh
@@ -101,7 +101,7 @@ public:
         return [this](const DomainType& /*v*/) { return this->psi_vac_; };
       else
         return [this](const DomainType& v) {
-          const RangeFieldType val = std::exp(-1e5 * std::pow(v[0] - 1., 2));
+          const RangeFieldType val = std::exp(-1e5 * std::pow(v[0] - 1., 2)) / helper_base::denominator();
           return (val > this->psi_vac_) ? val : this->psi_vac_;
         };
     };
diff --git a/dune/gdt/test/momentmodels/kinetictransport/testcases.hh b/dune/gdt/test/momentmodels/kinetictransport/testcases.hh
index f16229a02abd021ac0eda054f83b60b15e16b604..a85af35f6d892054f12a075895bee8fe86cfe22b 100644
--- a/dune/gdt/test/momentmodels/kinetictransport/testcases.hh
+++ b/dune/gdt/test/momentmodels/kinetictransport/testcases.hh
@@ -262,7 +262,8 @@ struct SourceBeamPnTestCase
   using SpaceType = FiniteVolumeSpace<GridViewType, dimRange, 1, RangeFieldType>;
   using AdvectionSourceSpaceType =
       std::conditional_t<reconstruct, DiscontinuousLagrangeSpace<GridViewType, dimRange, RangeFieldType>, SpaceType>;
-  using VectorType = typename Dune::XT::LA::Container<RangeFieldType, Dune::XT::LA::default_backend>::VectorType;
+  static constexpr auto la_backend = Dune::XT::LA::Backends::eigen_sparse;
+  using VectorType = typename Dune::XT::LA::Container<RangeFieldType, la_backend>::VectorType;
   using DiscreteFunctionType = DiscreteFunction<VectorType, GridViewType, dimRange, 1, RangeFieldType>;
   using ProblemType = SourceBeamPn<E, MomentBasis>;
   static constexpr RangeFieldType t_end = 0.25;
diff --git a/dune/gdt/test/momentmodels/mn-discretization.hh b/dune/gdt/test/momentmodels/mn-discretization.hh
index 0baea53936caa45aabad3375af899925e1a78d4c..038cad0a2943eedca23e0b8ede0379300bfa6d25 100644
--- a/dune/gdt/test/momentmodels/mn-discretization.hh
+++ b/dune/gdt/test/momentmodels/mn-discretization.hh
@@ -65,8 +65,9 @@ struct HyperbolicMnDiscretization
     using BoundaryValueType = typename ProblemType::BoundaryValueType;
     static constexpr size_t dimDomain = MomentBasis::dimDomain;
     static constexpr size_t dimRange = MomentBasis::dimRange;
-    using MatrixType = typename XT::LA::Container<RangeFieldType>::MatrixType;
-    using VectorType = typename XT::LA::Container<RangeFieldType>::VectorType;
+    static const auto la_backend = TestCaseType::la_backend;
+    using MatrixType = typename XT::LA::Container<RangeFieldType, la_backend>::MatrixType;
+    using VectorType = typename XT::LA::Container<RangeFieldType, la_backend>::VectorType;
 
     //******************* create grid and FV space ***************************************
     auto grid_config = ProblemType::default_grid_cfg();
@@ -109,7 +110,7 @@ struct HyperbolicMnDiscretization
 
     using AdvectionOperatorType = AdvectionFvOperator<MatrixType, GV, dimRange>;
     using EigenvectorWrapperType = typename EigenvectorWrapperChooser<MomentBasis, AnalyticalFluxType>::type;
-    using EntropySolverType = EntropySolver<MomentBasis, SpaceType>;
+    using EntropySolverType = EntropySolver<MomentBasis, SpaceType, MatrixType>;
     //    using ReconstructionOperatorType =
     //        LinearReconstructionOperator<AnalyticalFluxType, BoundaryValueType, GV, MatrixType,
     //        EigenvectorWrapperType>;
diff --git a/dune/gdt/tools/timestepper/adaptive-rungekutta-kinetic.hh b/dune/gdt/tools/timestepper/adaptive-rungekutta-kinetic.hh
new file mode 100644
index 0000000000000000000000000000000000000000..0d2a1193b5ccc61f458158ab624c9e87fc1b0a98
--- /dev/null
+++ b/dune/gdt/tools/timestepper/adaptive-rungekutta-kinetic.hh
@@ -0,0 +1,381 @@
+// This file is part of the dune-gdt project:
+//   https://github.com/dune-community/dune-gdt
+// Copyright 2010-2018 dune-gdt developers and contributors. All rights reserved.
+// License: Dual licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
+//      or  GPL-2.0+ (http://opensource.org/licenses/gpl-license)
+//          with "runtime exception" (http://www.dune-project.org/license.html)
+// Authors:
+//   Felix Schindler (2016 - 2017)
+//   Rene Milk       (2016 - 2018)
+//   Tobias Leibner  (2016 - 2017)
+
+#ifndef DUNE_GDT_TIMESTEPPER_KINETIC_ADAPTIVE_RUNGEKUTTA_HH
+#define DUNE_GDT_TIMESTEPPER_KINETIC_ADAPTIVE_RUNGEKUTTA_HH
+
+#include <utility>
+
+#include <dune/gdt/operators/interfaces.hh>
+
+#include <dune/xt/common/memory.hh>
+#include <dune/xt/common/string.hh>
+
+#include "adaptive-rungekutta.hh"
+
+
+namespace Dune {
+namespace GDT {
+
+
+/** \brief Time stepper using adaptive Runge Kutta methods
+ *
+ * Timestepper using adaptive Runge Kutta methods to solve equations of the form u_t = r * L(u, t) where u is a
+ * discrete function, L an operator acting on u and \alpha a scalar factor (e.g. -1).
+ * The specific Runge Kutta method can be chosen as the third template argument. If your desired Runge Kutta method is
+ * not contained in AdaptiveRungeKuttaMethods, choose AdaptiveRungeKuttaMethods::other and
+ * supply a DynamicMatrix< RangeFieldType > A and vectors b_1, b_2 (DynamicVector< RangeFieldType >) and c
+ * (DynamicVector< RangeFieldType >) in the constructor. Here, A, b_1, b_2 and c form the butcher tableau (see
+ * https://en.wikipedia.org/wiki/List_of_Runge%E2%80%93Kutta_methods#Embedded_methods, A is composed of the coefficients
+ * a_{ij}, b_1 of b_j, b_2 of b_j^* and c of c_j). The default is the Dormand-Prince RK45 method.
+ * In each time step, the error is estimated using the difference between the two solutions obtained using either b_1 or
+ * b_2. If the estimated error is higher than a specified tolerance tol, the calculation is repeated with a smaller
+ * time step. The tolerance tol and the error estimate are also used to estimate the optimal time step length for the
+ * next time step via dt_new = dt_old*min(max(0.9*(tol/error)^(1/5), scale_factor_min), scale_factor_max_);
+ *
+ * Notation: For an s-stage method,
+ * \mathbf{u}^{n+1} = \mathbf{u}^n + dt \sum_{i=0}^{s-1} b_i \mathbf{k}_i
+ * \mathbf{k}_i = L(\mathbf{u}_i, t^n + dt c_i)
+ * \mathbf{u}_i = \mathbf{u}^n + dt \sum_{j=0}^{i-1} a_{ij} \mathbf{k}_j,
+ *
+ * \tparam OperatorImp Type of operator L
+ * \tparam DiscreteFunctionImp Type of initial values and solution at a fixed time
+ * \tparam method Adaptive Runge-Kutta method that is used (default is AdaptiveRungeKuttaMethods::dormand_prince)
+ */
+template <class OperatorImp,
+          class DiscreteFunctionImp,
+          class EntropyFluxImp,
+          TimeStepperMethods method = TimeStepperMethods::dormand_prince>
+class KineticAdaptiveRungeKuttaTimeStepper : public TimeStepperInterface<DiscreteFunctionImp>
+{
+  typedef TimeStepperInterface<DiscreteFunctionImp> BaseType;
+  typedef typename internal::AdaptiveButcherArrayProvider<typename BaseType::RangeFieldType, method>
+      ButcherArrayProviderType;
+
+public:
+  typedef OperatorImp OperatorType;
+  typedef DiscreteFunctionImp DiscreteFunctionType;
+
+  typedef typename DiscreteFunctionType::DomainFieldType DomainFieldType;
+  typedef typename DiscreteFunctionType::RangeFieldType RangeFieldType;
+  typedef typename Dune::DynamicMatrix<RangeFieldType> MatrixType;
+  typedef typename Dune::DynamicVector<RangeFieldType> VectorType;
+  typedef typename std::vector<std::pair<RangeFieldType, DiscreteFunctionType>> SolutionType;
+  using EntropyFluxType = EntropyFluxImp;
+  using BaseType::dimRange;
+
+  DiscreteFunctionType get_u(const DiscreteFunctionType& alpha)
+  {
+    DiscreteFunctionType u(alpha.space());
+    const auto local_u = u.local_discrete_function();
+    const auto local_alpha = alpha.local_discrete_function();
+    XT::Common::FieldVector<RangeFieldType, dimRange> local_alpha_vec;
+    XT::Common::FieldVector<RangeFieldType, dimRange> local_u_vec;
+    for (auto&& entity : Dune::elements(alpha.space().grid_view())) {
+      local_alpha->bind(entity);
+      local_u->bind(entity);
+      for (size_t ii = 0; ii < dimRange; ++ii)
+        local_alpha_vec[ii] = local_alpha->dofs().get_entry(ii);
+      local_u_vec = entropy_flux_.get_u(local_alpha_vec);
+      for (size_t ii = 0; ii < dimRange; ++ii)
+        local_u->dofs().set_entry(ii, local_u_vec[ii]);
+    }
+    return u;
+  }
+
+  /**
+   * \brief Constructor for AdaptiveRungeKuttaTimeStepper time stepper
+   * \param op Operator L
+   * \param initial_values Discrete function containing initial values for u at time t_0.
+   * \param r Scalar factor (see above, default is 1)
+   * \param t_0 Initial time (default is 0)
+   * \param tol Error tolerance for the adaptive scheme (default is 1e-4)
+   * \param scale_factor_min Minimum allowed factor for time step scaling (default is 0.2).
+   * \param scale_factor_max Maximum allowed factor for time step scaling (default is 5).
+   * \param A Coefficient matrix (only provide if you use AdaptiveRungeKuttaMethods::other)
+   * \param b_1 First set of coefficients (only provide if you use AdaptiveRungeKuttaMethods::other)
+   * \param b_2 Second set of coefficients (only provide if you use AdaptiveRungeKuttaMethods::other)
+   * \param c Coefficients for time steps (only provide if you use AdaptiveRungeKuttaMethods::other)
+   */
+  KineticAdaptiveRungeKuttaTimeStepper(const OperatorType& op,
+                                       const EntropyFluxType& entropy_flux,
+                                       DiscreteFunctionType& initial_values,
+                                       const RangeFieldType r = 1.0,
+                                       const double t_0 = 0.0,
+                                       const RangeFieldType tol = 1e-4,
+                                       const RangeFieldType scale_factor_min = 0.2,
+                                       const RangeFieldType scale_factor_max = 5,
+                                       const MatrixType& A = ButcherArrayProviderType::A(),
+                                       const VectorType& b_1 = ButcherArrayProviderType::b_1(),
+                                       const VectorType& b_2 = ButcherArrayProviderType::b_2(),
+                                       const VectorType& c = ButcherArrayProviderType::c())
+    : BaseType(t_0, initial_values)
+    , op_(op)
+    , entropy_flux_(entropy_flux)
+    , r_(r)
+    , tol_(tol)
+    , scale_factor_min_(scale_factor_min)
+    , scale_factor_max_(scale_factor_max)
+    , alpha_tmp_(BaseType::current_solution())
+    , A_(A)
+    , b_1_(b_1)
+    , b_2_(b_2)
+    , c_(c)
+    , b_diff_(b_2_ - b_1_)
+    , num_stages_(A_.rows())
+  {
+    assert(Dune::XT::Common::FloatCmp::gt(tol_, 0.0));
+    assert(Dune::XT::Common::FloatCmp::le(scale_factor_min_, 1.0));
+    assert(Dune::XT::Common::FloatCmp::ge(scale_factor_max_, 1.0));
+    assert(A_.rows() == A_.cols() && "A has to be a square matrix");
+    assert(b_1_.size() == A_.rows());
+    assert(b_2_.size() == A_.rows());
+    assert(c_.size() == A_.rows());
+#ifndef NDEBUG
+    for (size_t ii = 0; ii < A_.rows(); ++ii) {
+      for (size_t jj = ii; jj < A_.cols(); ++jj) {
+        assert(Dune::XT::Common::FloatCmp::eq(A_[ii][jj], 0.0)
+               && "A has to be a lower triangular matrix with 0 on the main diagonal");
+      }
+    }
+#endif // NDEBUG
+    // store as many discrete functions as needed for intermediate stages
+    for (size_t ii = 0; ii < num_stages_; ++ii) {
+      stages_k_.emplace_back(current_solution());
+    }
+  } // constructor AdaptiveRungeKuttaTimeStepper
+
+  using BaseType::current_solution;
+  using BaseType::current_time;
+  using BaseType::solve;
+
+  virtual RangeFieldType solve(const RangeFieldType t_end,
+                               const RangeFieldType initial_dt,
+                               const size_t num_save_steps,
+                               const size_t num_output_steps,
+                               const bool save_solution,
+                               const bool visualize,
+                               const bool write_discrete,
+                               const bool write_exact,
+                               const std::string prefix,
+                               typename BaseType::DiscreteSolutionType& sol,
+                               const typename BaseType::VisualizerType& visualizer,
+                               const typename BaseType::StringifierType& stringifier,
+                               const typename BaseType::GridFunctionType& exact_solution) override final
+  {
+    const auto ret = BaseType::solve(t_end,
+                                     initial_dt,
+                                     num_save_steps,
+                                     num_output_steps,
+                                     save_solution,
+                                     visualize,
+                                     write_discrete,
+                                     write_exact,
+                                     prefix,
+                                     sol,
+                                     visualizer,
+                                     stringifier,
+                                     exact_solution);
+    // in a fractional step scheme, we cannot use last_stage_of_previous_step
+    last_stage_of_previous_step_ = nullptr;
+    return ret;
+  }
+
+  RangeFieldType step(const RangeFieldType dt, const RangeFieldType max_dt) override final
+  {
+    RangeFieldType actual_dt = std::min(dt, max_dt);
+    RangeFieldType mixed_error = std::numeric_limits<RangeFieldType>::max();
+    RangeFieldType time_step_scale_factor = 1.0;
+    const std::vector<RangeFieldType> r_sequence = {0, 1e-8, 1e-6, 1e-4, 1e-3, 1e-2, 5e-2, 0.1, 0.5, 1};
+    auto r_it = r_sequence.begin();
+
+    auto& t = current_time();
+    auto& alpha_n = current_solution();
+
+    while (Dune::XT::Common::FloatCmp::gt(mixed_error, tol_)) {
+      last_stage_of_previous_step_ = nullptr;
+      bool skip_error_computation = false;
+      actual_dt *= time_step_scale_factor;
+      size_t first_stage_to_compute = 0;
+      if (last_stage_of_previous_step_) {
+        stages_k_[0].dofs().vector() = last_stage_of_previous_step_->dofs().vector();
+        first_stage_to_compute = 1;
+      }
+      // std::cout << "t: " << t << ", dt: " << actual_dt << std::endl;
+
+      bool consider_regularization = actual_dt < 1e-5 ? true : false;
+      for (size_t ii = first_stage_to_compute; ii < num_stages_; ++ii) {
+        std::fill(stages_k_[ii].dofs().vector().begin(), stages_k_[ii].dofs().vector().end(), RangeFieldType(0.));
+        alpha_tmp_.dofs().vector() = alpha_n.dofs().vector();
+        for (size_t jj = 0; jj < ii; ++jj)
+          alpha_tmp_.dofs().vector() += stages_k_[jj].dofs().vector() * (actual_dt * r_ * (A_[ii][jj]));
+        try {
+          op_.apply(alpha_tmp_.dofs().vector(),
+                    stages_k_[ii].dofs().vector(),
+                    XT::Common::Parameter(
+                        {{"t", {t + actual_dt * c_[ii]}}, {"reg", {static_cast<double>(consider_regularization)}}}));
+          const auto& reg_indicators = op_.reg_indicators();
+          if (consider_regularization
+              && std::any_of(reg_indicators.begin(), reg_indicators.end(), [](const bool& a) { return a; })) {
+            const auto& grid_view = alpha_n.space().grid_view();
+            const auto local_alpha = alpha_n.local_discrete_function();
+            const auto& basis_functions = entropy_flux_.basis_functions();
+            const auto alpha_one = basis_functions.alpha_one();
+            XT::Common::FieldVector<RangeFieldType, dimRange> local_alpha_vec;
+            if (++r_it == r_sequence.end())
+              DUNE_THROW(Dune::InvalidStateException, "Fully regularized, still fails!");
+            const auto r = *r_it;
+            for (auto&& entity : Dune::elements(grid_view)) {
+              const auto entity_index = grid_view.indexSet().index(entity);
+              if (reg_indicators[entity_index]) {
+                std::cout << "Regularized on entity " << entity_index << " with r = " << r << std::endl;
+                local_alpha->bind(entity);
+                for (size_t jj = 0; jj < dimRange; ++jj)
+                  local_alpha_vec[jj] = local_alpha->dofs().get_entry(jj);
+                const auto old_density = basis_functions.density(entropy_flux_.get_u(local_alpha_vec));
+                const auto alpha_iso = basis_functions.alpha_iso(old_density);
+                for (size_t jj = 0; jj < dimRange; ++jj)
+                  local_alpha_vec[jj] = (1 - r) * local_alpha_vec[jj] + r * alpha_iso[jj];
+                const auto reg_density = basis_functions.density(entropy_flux_.get_u(local_alpha_vec));
+                const auto factor = std::log(old_density / reg_density);
+                for (size_t jj = 0; jj < dimRange; ++jj) {
+                  local_alpha_vec[jj] += alpha_one[jj] * factor;
+                  local_alpha->dofs().set_entry(jj, local_alpha_vec[jj]);
+                }
+                const auto new_density = basis_functions.density(entropy_flux_.get_u(local_alpha_vec));
+                std::cout << "density: " << old_density << ", new_density: " << new_density << std::endl;
+              }
+            }
+            DUNE_THROW(Dune::MathError, "Regularization done!");
+          }
+        } catch (const Dune::MathError& e) {
+          mixed_error = 1e10;
+          skip_error_computation = true;
+          time_step_scale_factor = 0.5;
+          std::cout << "MathError! " << e.what() << std::endl;
+          // std::cout << XT::Common::to_string(alpha_n.dofs().vector()) << std::endl;
+          // for (size_t jj = 0; jj < ii; ++jj)
+          //   std::cout << XT::Common::to_string(stages_k_[jj].dofs().vector()) << std::endl;
+          break;
+#if HAVE_TBB
+        } catch (const tbb::captured_exception& e) {
+          mixed_error = 1e10;
+          skip_error_computation = true;
+          time_step_scale_factor = 0.5;
+          std::cout << "MathError! " << e.what() << std::endl;
+          // std::cout << XT::Common::to_string(alpha_n.dofs().vector()) << std::endl;
+          // for (size_t jj = 0; jj < ii; ++jj)
+          // std::cout << XT::Common::to_string(stages_k_[jj].dofs().vector()) << std::endl;
+          break;
+#endif
+        }
+      }
+
+      if (!skip_error_computation) {
+
+        alpha_tmp_.dofs().vector() = alpha_n.dofs().vector();
+
+        // calculate alpha at timestep n+1
+        for (size_t ii = 0; ii < num_stages_; ++ii)
+          alpha_n.dofs().vector() += stages_k_[ii].dofs().vector() * (actual_dt * r_ * b_1_[ii]);
+        const auto u_1 = get_u(alpha_n);
+
+        // compute alternative alpha
+        for (size_t ii = 0; ii < num_stages_; ++ii)
+          alpha_tmp_.dofs().vector() += stages_k_[ii].dofs().vector() * (actual_dt * r_ * b_2_[ii]);
+        const auto u_2 = get_u(alpha_tmp_);
+
+        // scale error, use absolute error if norm is less than 0.01 and relative error else
+        auto diff_vector = u_1.dofs().vector() - u_2.dofs().vector();
+        for (size_t ii = 0; ii < diff_vector.size(); ++ii) {
+          if (std::abs(u_1.dofs().vector()[ii]) > 0.01)
+            diff_vector[ii] /= std::abs(u_1.dofs().vector()[ii]);
+        }
+        mixed_error = diff_vector.sup_norm();
+
+        // compute error vector in alpha
+        diff_vector = alpha_n.dofs().vector() - alpha_tmp_.dofs().vector();
+
+        // scale error, use absolute error if norm is less than 0.01 and relative error else
+        for (size_t ii = 0; ii < diff_vector.size(); ++ii) {
+          if (std::abs(alpha_n.dofs().vector()[ii]) > 0.01)
+            diff_vector[ii] /= std::abs(alpha_n.dofs().vector()[ii]);
+        }
+        mixed_error = std::max(mixed_error, diff_vector.sup_norm());
+        if (std::isnan(mixed_error))
+          mixed_error = 1e10;
+
+        // scale dt to get the estimated optimal time step length, TODO: adapt formula
+        time_step_scale_factor =
+            std::min(std::max(0.9 * std::pow(tol_ / mixed_error, 1.0 / 5.0), scale_factor_min_), scale_factor_max_);
+
+        if (mixed_error > tol_) { // go back from u at timestep n+1 to timestep n
+          for (size_t ii = 0; ii < num_stages_; ++ii)
+            alpha_n.dofs().vector() += stages_k_[ii].dofs().vector() * (-1.0 * r_ * actual_dt * b_1_[ii]);
+        }
+      }
+    } // while (mixed_error > tol_)
+    // if (!last_stage_of_previous_step_)
+    // last_stage_of_previous_step_ = Dune::XT::Common::make_unique<DiscreteFunctionType>(alpha_n);
+    // last_stage_of_previous_step_->dofs().vector() = stages_k_[num_stages_ - 1].dofs().vector();
+
+#if 0
+    const auto alpha_local_func = alpha_n.local_discrete_function();
+    for (auto&& element : Dune::elements(alpha_n.space().grid_view())) {
+      alpha_local_func->bind(element);
+      for (size_t ii = 0; ii < BaseType::dimRange; ++ii) {
+//        constexpr double min_val = -100.;
+        constexpr double max_val = 1000.;
+        const auto entry_ii = alpha_local_func->dofs().get_entry(ii);
+        if (std::abs(entry_ii) > max_val) {
+//          std::cout << "limited from " << alpha_local_func->dofs().get_entry(ii) << " to " << min_val << " in entity " << element.geometry().center() << std::endl;
+//          std::cout << "Entries are: ";
+//          for (size_t jj = 0; jj < BaseType::dimRange; ++jj) {
+//            std::cout << alpha_local_func->dofs().get_entry(jj) << " ";
+//          }
+//          std::cout << std::endl;
+          last_stage_of_previous_step_ = nullptr;
+//          alpha_local_func->dofs().set_entry(ii, min_val);
+          std::cout << "Replacing " << entry_ii << " by " << std::copysign(max_val, entry_ii) << std::endl;
+          alpha_local_func->dofs().set_entry(ii, std::copysign(max_val, entry_ii));
+        }
+      } // ii
+    } // elements
+#endif
+
+    t += actual_dt;
+
+    return actual_dt * time_step_scale_factor;
+  } // ... step(...)
+
+private:
+  const OperatorType& op_;
+  const EntropyFluxType& entropy_flux_;
+  const RangeFieldType r_;
+  const RangeFieldType tol_;
+  const RangeFieldType scale_factor_min_;
+  const RangeFieldType scale_factor_max_;
+  DiscreteFunctionType alpha_tmp_;
+  const MatrixType A_;
+  const VectorType b_1_;
+  const VectorType b_2_;
+  const VectorType c_;
+  const VectorType b_diff_;
+  std::vector<DiscreteFunctionType> stages_k_;
+  const size_t num_stages_;
+  std::unique_ptr<DiscreteFunctionType> last_stage_of_previous_step_;
+}; // class KineticAdaptiveRungeKuttaTimeStepper
+
+
+} // namespace GDT
+} // namespace Dune
+
+#endif // DUNE_GDT_TIMESTEPPER_KINETIC_ADAPTIVE_RUNGEKUTTA_HH