diff --git a/CMakeLists.txt b/CMakeLists.txt index b0506a0f563d138634ca7c1e0317be6f9596253a..8c2a45c88f68ba42c403f0a68f6d4ef24eb26691 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -25,38 +25,19 @@ include(DuneMacros) find_package(dune-xt-common REQUIRED) list(APPEND CMAKE_MODULE_PATH "${dune-xt-common_MODULE_PATH}") -include(DuneUtils) # start a dune project with information from dune.module dune_project() dune_enable_all_packages(INCLUDE_DIRS ${dune-xt-la_SOURCE_DIR}/dune - MODULE_LIBRARIES dunextla - VERBOSE) - - -# add header of this module for header listing -file(GLOB_RECURSE xtlaheader "${CMAKE_CURRENT_SOURCE_DIR}/dune/*.hh") -set(COMMON_HEADER ${xtlaheader} ${DUNE_HEADERS}) + MODULE_LIBRARIES dunextla) -# add header of dependent modules for header listing -foreach(_mod ${ALL_DEPENDENCIES}) - file(GLOB_RECURSE HEADER_LIST "${CMAKE_CURRENT_SOURCE_DIR}/../${_mod}/*.hh") - list(APPEND COMMON_HEADER ${HEADER_LIST}) -endforeach(_mod DEPENDENCIES) -set_source_files_properties(${COMMON_HEADER} PROPERTIES HEADER_FILE_ONLY 1) - -#disable most warnings from dependent modules -foreach(_mod ${ALL_DEPENDENCIES}) - dune_module_to_uppercase(_upper_case "${_mod}") - if(${_mod}_INCLUDE_DIRS) - foreach( _idir ${${_mod}_INCLUDE_DIRS} ) - add_definitions("-isystem ${_idir}") - endforeach( _idir ) - endif(${_mod}_INCLUDE_DIRS) -endforeach(_mod DEPENDENCIES) +include(DuneUtils) add_subdirectory(dune) add_subdirectory(doc) + +add_header_listing() +make_dependent_modules_sys_included() add_format(${CMAKE_CURRENT_SOURCE_DIR}) add_tidy(${CMAKE_CURRENT_SOURCE_DIR}) diff --git a/dune/CMakeLists.txt b/dune/CMakeLists.txt index 3dfcb847475bd7f90b48bb0471b97fa8cf005bbb..4f85d8fc09e2edc2cf09dfe42ef6f74f83f1fbe2 100644 --- a/dune/CMakeLists.txt +++ b/dune/CMakeLists.txt @@ -6,3 +6,7 @@ # Felix Schindler (2016) add_subdirectory(xt) + +install(DIRECTORY xt + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dune/ + FILES_MATCHING PATTERN "*.hh") diff --git a/dune/xt/la/container/common.hh b/dune/xt/la/container/common.hh index eb86ef74f9055ecc91b9408f0486032c22ac051f..2ab3dee26572bc6cfd61927998d6048c2ed2c8c6 100644 --- a/dune/xt/la/container/common.hh +++ b/dune/xt/la/container/common.hh @@ -19,12 +19,14 @@ #include <type_traits> #include <vector> #include <complex> +#include <mutex> #include <dune/common/dynvector.hh> #include <dune/common/dynmatrix.hh> #include <dune/common/densematrix.hh> #include <dune/common/float_cmp.hh> #include <dune/common/ftraits.hh> +#include <dune/common/unused.hh> #include <dune/xt/common/exceptions.hh> @@ -120,7 +122,10 @@ public: } } // CommonDenseVector(...) - CommonDenseVector(const ThisType& other) = default; + CommonDenseVector(const ThisType& other) + : backend_(other.backend_) + { + } explicit CommonDenseVector(const BackendType& other, const bool /*prune*/ = false, @@ -151,6 +156,7 @@ public: ThisType& operator=(const ScalarType& value) { ensure_uniqueness(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); for (auto& element : *this) element = value; return *this; @@ -161,6 +167,7 @@ public: */ ThisType& operator=(const BackendType& other) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); backend_ = std::make_shared<BackendType>(other); return *this; } @@ -172,13 +179,12 @@ public: { ensure_uniqueness(); return *backend_; - } // ... backend(...) + } const BackendType& backend() const { - ensure_uniqueness(); return *backend_; - } // ... backend(...) + } /// \} /// \name Required by ProvidesDataAccess. @@ -195,24 +201,26 @@ public: ThisType copy() const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); return ThisType(*backend_); } void scal(const ScalarType& alpha) { - backend() *= alpha; - } // ... scal(...) + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + backend_ref *= alpha; + } void axpy(const ScalarType& alpha, const ThisType& xx) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (xx.size() != size()) DUNE_THROW(Common::Exceptions::shapes_do_not_match, "The size of x (" << xx.size() << ") does not match the size of this (" << size() << ")!"); - ensure_uniqueness(); - auto& this_ref = *backend_; - const auto& xx_ref = *(xx.backend_); - for (size_t ii = 0; ii < this_ref.size(); ++ii) - this_ref[ii] += alpha * xx_ref[ii]; + for (size_t ii = 0; ii < backend_ref.size(); ++ii) + backend_ref[ii] += alpha * xx.backend()[ii]; } // ... axpy(...) bool has_equal_shape(const ThisType& other) const @@ -231,27 +239,30 @@ public: void add_to_entry(const size_t ii, const ScalarType& value) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); assert(ii < size()); - ensure_uniqueness(); - backend_->operator[](ii) += value; - } // ... add_to_entry(...) + backend_ref[ii] += value; + } void set_entry(const size_t ii, const ScalarType& value) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); assert(ii < size()); - ensure_uniqueness(); - backend_->operator[](ii) = value; - } // ... set_entry(...) + backend_ref[ii] = value; + } ScalarType get_entry(const size_t ii) const { assert(ii < size()); - return backend_->operator[](ii); - } // ... get_entry(...) + return backend()[ii]; + } +protected: inline ScalarType& get_entry_ref(const size_t ii) { - return backend()[ii]; + return backend_->operator[](ii); } inline const ScalarType& get_entry_ref(const size_t ii) const @@ -259,14 +270,15 @@ public: return backend_->operator[](ii); } +public: inline ScalarType& operator[](const size_t ii) { - return get_entry_ref(ii); + return backend()[ii]; } inline const ScalarType& operator[](const size_t ii) const { - return get_entry_ref(ii); + return backend()[ii]; } /// \} @@ -275,94 +287,72 @@ public: virtual ScalarType dot(const ThisType& other) const override final { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (other.size() != size()) DUNE_THROW(Common::Exceptions::shapes_do_not_match, "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - return backend_->operator*(*(other.backend_)); + return backend() * other.backend(); } // ... dot(...) virtual RealType l1_norm() const override final { - return backend_->one_norm(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + return backend().one_norm(); } virtual RealType l2_norm() const override final { - return backend_->two_norm(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + return backend().two_norm(); } virtual RealType sup_norm() const override final { - return backend_->infinity_norm(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + return backend().infinity_norm(); } - virtual void add(const ThisType& other, ThisType& result) const override final - { - if (other.size() != size()) - DUNE_THROW(Common::Exceptions::shapes_do_not_match, - "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - if (result.size() != size()) - DUNE_THROW(Common::Exceptions::shapes_do_not_match, - "The size of result (" << result.size() << ") does not match the size of this (" << size() << ")!"); - BackendType& result_ref = result.backend(); - for (size_t ii = 0; ii < size(); ++ii) - result_ref[ii] = backend_->operator[](ii) + other.backend_->operator[](ii); - } // ... add(...) - virtual void iadd(const ThisType& other) override final { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (other.size() != size()) DUNE_THROW(Common::Exceptions::shapes_do_not_match, "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - backend() += *(other.backend_); + backend_ref += other.backend(); } // ... iadd(...) - virtual void sub(const ThisType& other, ThisType& result) const override final - { - if (other.size() != size()) - DUNE_THROW(Common::Exceptions::shapes_do_not_match, - "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - if (result.size() != size()) - DUNE_THROW(Common::Exceptions::shapes_do_not_match, - "The size of result (" << result.size() << ") does not match the size of this (" << size() << ")!"); - BackendType& result_ref = result.backend(); - for (size_t ii = 0; ii < size(); ++ii) - result_ref[ii] = backend_->operator[](ii) - other.backend_->operator[](ii); - } // ... sub(...) - virtual void isub(const ThisType& other) override final { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (other.size() != size()) DUNE_THROW(Common::Exceptions::shapes_do_not_match, "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - backend() -= *(other.backend_); + backend_ref -= other.backend(); } // ... isub(...) - /// \} - /// \name Imported from VectorInterface. - /// \{ - - using VectorInterfaceType::add; - using VectorInterfaceType::sub; - - /// \} - -private: +protected: /** * \see ContainerInterface */ - inline void ensure_uniqueness() const + inline void ensure_uniqueness() { - if (!backend_.unique()) + if (!backend_.unique()) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); backend_ = std::make_shared<BackendType>(*backend_); + } } // ... ensure_uniqueness(...) +private: friend class VectorInterface<internal::CommonDenseVectorTraits<ScalarType>, ScalarType>; friend class CommonDenseMatrix<ScalarType>; - mutable std::shared_ptr<BackendType> backend_; + std::shared_ptr<BackendType> backend_; + mutable std::mutex mutex_; }; // class CommonDenseVector + /** * \brief A dense matrix implementation of MatrixInterface using the Dune::DynamicMatrix. */ @@ -433,6 +423,7 @@ public: ThisType& operator=(const ThisType& other) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); backend_ = other.backend_; return *this; } @@ -442,6 +433,7 @@ public: */ ThisType& operator=(const BackendType& other) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); backend_ = std::make_shared<BackendType>(other); return *this; } @@ -457,7 +449,6 @@ public: const BackendType& backend() const { - ensure_uniqueness(); return *backend_; } @@ -467,16 +458,21 @@ public: ThisType copy() const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); return ThisType(*backend_); } void scal(const ScalarType& alpha) { - backend() *= alpha; + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + backend_ref *= alpha; } void axpy(const ScalarType& alpha, const ThisType& xx) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (!has_equal_shape(xx)) DUNE_THROW(Common::Exceptions::shapes_do_not_match, "The shape of xx (" << xx.rows() << "x" << xx.cols() << ") does not match the shape of this (" @@ -484,7 +480,7 @@ public: << "x" << cols() << ")!"); - backend().axpy(alpha, *(xx.backend_)); + backend_ref.axpy(alpha, xx.backend()); } // ... axpy(...) bool has_equal_shape(const ThisType& other) const @@ -498,12 +494,12 @@ public: inline size_t rows() const { - return backend_->rows(); + return backend().rows(); } inline size_t cols() const { - return backend_->cols(); + return backend().cols(); } inline void mv(const VectorInterface<internal::CommonDenseVectorTraits<ScalarType>, ScalarType>& xx, @@ -514,57 +510,67 @@ public: inline void mv(const CommonDenseVector<ScalarType>& xx, CommonDenseVector<ScalarType>& yy) const { - backend_->mv(*(xx.backend_), yy.backend()); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + backend().mv(xx.backend(), yy.backend()); } void add_to_entry(const size_t ii, const size_t jj, const ScalarType& value) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); assert(ii < rows()); assert(jj < cols()); - backend()[ii][jj] += value; + backend_ref[ii][jj] += value; } // ... add_to_entry(...) void set_entry(const size_t ii, const size_t jj, const ScalarType& value) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); assert(ii < rows()); assert(jj < cols()); - backend()[ii][jj] = value; + backend_ref[ii][jj] = value; } // ... set_entry(...) ScalarType get_entry(const size_t ii, const size_t jj) const { assert(ii < rows()); assert(jj < cols()); - return backend_->operator[](ii)[jj]; + return backend()[ii][jj]; } // ... get_entry(...) void clear_row(const size_t ii) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (ii >= rows()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given ii (" << ii << ") is larger than the rows of this (" << rows() << ")!"); - backend()[ii] *= ScalarType(0); + backend_ref[ii] *= ScalarType(0); } // ... clear_row(...) void clear_col(const size_t jj) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (jj >= cols()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given jj (" << jj << ") is larger than the cols of this (" << cols() << ")!"); - BackendType& backend_ref = backend(); for (size_t ii = 0; ii < rows(); ++ii) backend_ref[ii][jj] = ScalarType(0); } // ... clear_col(...) void unit_row(const size_t ii) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (ii >= cols()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given ii (" << ii << ") is larger than the cols of this (" << cols() << ")!"); if (ii >= rows()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given ii (" << ii << ") is larger than the rows of this (" << rows() << ")!"); - auto& row = backend()[ii]; + auto& row = backend_ref[ii]; for (size_t jj = 0; jj < cols(); ++jj) row[jj] = ScalarType(0); row[ii] = ScalarType(1); @@ -572,22 +578,24 @@ public: void unit_col(const size_t jj) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (jj >= cols()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given jj (" << jj << ") is larger than the cols of this (" << cols() << ")!"); if (jj >= rows()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given jj (" << jj << ") is larger than the rows of this (" << rows() << ")!"); - ensure_uniqueness(); for (size_t ii = 0; ii < rows(); ++ii) - backend_->operator[](ii)[jj] = ScalarType(0); - backend_->operator[](jj)[jj] = ScalarType(1); + backend_ref[ii][jj] = ScalarType(0); + backend_ref[jj][jj] = ScalarType(1); } // ... unit_col(...) bool valid() const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); for (size_t ii = 0; ii < rows(); ++ii) { - const auto& row_vec = backend_->operator[](ii); + const auto& row_vec = backend()[ii]; for (size_t jj = 0; jj < cols(); ++jj) { const auto& entry = row_vec[jj]; if (Common::isnan(entry) || Common::isinf(entry)) @@ -599,17 +607,21 @@ public: /// \} -private: +protected: /** * \see ContainerInterface */ - inline void ensure_uniqueness() const + inline void ensure_uniqueness() { - if (!backend_.unique()) + if (!backend_.unique()) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); backend_ = std::make_shared<BackendType>(*backend_); + } } // ... ensure_uniqueness(...) - mutable std::shared_ptr<BackendType> backend_; +private: + std::shared_ptr<BackendType> backend_; + mutable std::mutex mutex_; }; // class CommonDenseMatrix /** @@ -629,7 +641,6 @@ public: typedef typename Traits::RealType RealType; typedef std::vector<size_t> IndexVectorType; - /** * \brief This is the constructor of interest which creates a sparse matrix. */ @@ -726,6 +737,7 @@ public: template <int ROWS, int COLS> explicit operator Dune::FieldMatrix<ScalarType, ROWS, COLS>() const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); assert(ROWS == num_rows_ && COLS == num_cols_); Dune::FieldMatrix<ScalarType, ROWS, COLS> ret(ScalarType(0)); for (size_t rr = 0; rr < ROWS; ++rr) @@ -772,14 +784,16 @@ public: inline void scal(const ScalarType& alpha) { ensure_uniqueness(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); std::transform( backend_->begin(), backend_->end(), backend_->begin(), std::bind1st(std::multiplies<ScalarType>(), alpha)); } inline void axpy(const ScalarType& alpha, const ThisType& xx) { - assert(has_equal_shape(xx)); ensure_uniqueness(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + assert(has_equal_shape(xx)); const auto& xx_backend = xx.backend(); for (size_t ii = 0; ii < backend_->size(); ++ii) backend_->operator[](ii) += alpha * xx_backend[ii]; @@ -807,6 +821,7 @@ public: template <class XX, class YY> inline void mv(const XX& xx, YY& yy) const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); std::fill(yy.begin(), yy.end(), ScalarType(0)); for (size_t rr = 0; rr < num_rows_; ++rr) for (size_t kk = row_pointers_->operator[](rr); kk < row_pointers_->operator[](rr + 1); ++kk) @@ -816,6 +831,7 @@ public: inline void add_to_entry(const size_t rr, const size_t cc, const ScalarType& value) { ensure_uniqueness(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); const size_t index = get_index_in_backend(rr, cc); assert(index != size_t(-1) && "Entry has to be in the sparsity pattern!"); backend_->operator[](index) += value; @@ -823,6 +839,7 @@ public: inline ScalarType get_entry(const size_t rr, const size_t cc) const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); const size_t index = get_index_in_backend(rr, cc); return index == size_t(-1) ? ScalarType(0) : backend_->operator[](index); } @@ -830,6 +847,7 @@ public: inline void set_entry(const size_t rr, const size_t cc, const ScalarType value) { ensure_uniqueness(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); const size_t index = get_index_in_backend(rr, cc); assert(index != size_t(-1) && "Entry has to be in the sparsity pattern!"); backend_->operator[](index) = value; @@ -838,6 +856,7 @@ public: inline void clear_row(const size_t rr) { ensure_uniqueness(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); std::fill(backend_->begin() + row_pointers_->operator[](rr), backend_->begin() + row_pointers_->operator[](rr + 1), ScalarType(0)); @@ -846,6 +865,7 @@ public: inline void clear_col(const size_t cc) { ensure_uniqueness(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); for (size_t kk = 0; kk < backend_->size(); ++kk) { if (column_indices_->operator[](kk) == cc) backend_->operator[](kk) = ScalarType(0); @@ -868,6 +888,7 @@ public: bool valid() const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); // iterate over non-zero entries for (const auto& entry : *backend_) if (XT::Common::isnan(std::real(entry)) || XT::Common::isnan(std::imag(entry)) @@ -885,6 +906,7 @@ public: const typename Common::FloatCmp::DefaultEpsilon<ScalarType>::Type eps = Common::FloatCmp::DefaultEpsilon<ScalarType>::value()) const override { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); SparsityPatternDefault ret(num_rows_); for (size_t rr = 0; rr < num_rows_; ++rr) { for (size_t kk = row_pointers_->operator[](rr + 1); kk < row_pointers_->operator[](rr + 1); ++kk) { @@ -901,6 +923,7 @@ public: private: size_t get_index_in_backend(const size_t rr, const size_t cc) const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); const auto& row_offset = row_pointers_->operator[](rr); auto column_indices_iterator = column_indices_->begin(); column_indices_iterator += row_offset; @@ -910,16 +933,21 @@ private: return size_t(-1); } - inline void ensure_uniqueness() const +protected: + inline void ensure_uniqueness() { - if (!backend_.unique()) + if (!backend_.unique()) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); backend_ = std::make_shared<BackendType>(*backend_); + } } // ... ensure_uniqueness(...) +private: size_t num_rows_, num_cols_; - mutable std::shared_ptr<BackendType> backend_; + std::shared_ptr<BackendType> backend_; std::shared_ptr<IndexVectorType> row_pointers_; std::shared_ptr<IndexVectorType> column_indices_; + mutable std::mutex mutex_; }; // class CommonSparseMatrix } // namespace LA diff --git a/dune/xt/la/container/container-interface.hh b/dune/xt/la/container/container-interface.hh index 97af127ad72283bfdaa688b29f279edd3b912655..80279d07196b1183893fa3983f8ccb49c3552056 100644 --- a/dune/xt/la/container/container-interface.hh +++ b/dune/xt/la/container/container-interface.hh @@ -176,6 +176,13 @@ public: return this->as_imp().has_equal_shape(other); } +protected: + inline void ensure_uniqueness() + { + CHECK_AND_CALL_CRTP(this->as_imp().ensure_uniqueness()); + } + +public: /// \} /// \name Are provided by the interface for convenience! /// \note Those marked as virtual may be implemented more efficiently in a derived class! diff --git a/dune/xt/la/container/eigen/base.hh b/dune/xt/la/container/eigen/base.hh index c63cca1c9facca5415695bc7edc985f95a00d938..32f48b331a25347415c029a134aac54f343c534a 100644 --- a/dune/xt/la/container/eigen/base.hh +++ b/dune/xt/la/container/eigen/base.hh @@ -17,6 +17,7 @@ #include <type_traits> #include <vector> #include <complex> +#include <mutex> #include <dune/xt/common/disable_warnings.hh> #if HAVE_EIGEN @@ -26,6 +27,7 @@ #include <dune/common/typetraits.hh> #include <dune/common/ftraits.hh> +#include <dune/common/unused.hh> #include <dune/xt/common/exceptions.hh> #include <dune/xt/common/crtp.hh> @@ -90,7 +92,6 @@ public: const BackendType& backend() const { - ensure_uniqueness(); return *backend_; } @@ -100,21 +101,26 @@ public: VectorImpType copy() const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); return VectorImpType(*backend_); } void scal(const ScalarType& alpha) { - backend() *= alpha; + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + backend_ref *= alpha; } template <class T> void axpy(const ScalarType& alpha, const EigenBaseVector<T, ScalarType>& xx) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (xx.size() != size()) DUNE_THROW(Common::Exceptions::shapes_do_not_match, "The size of xx (" << xx.size() << ") does not match the size of this (" << size() << ")!"); - backend() += alpha * xx.backend(); + backend_ref += alpha * xx.backend(); } // ... axpy(...) bool has_equal_shape(const VectorImpType& other) const @@ -133,40 +139,46 @@ public: void add_to_entry(const size_t ii, const ScalarType& value) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); assert(ii < size()); - backend()(ii) += value; + backend_ref(ii) += value; } void set_entry(const size_t ii, const ScalarType& value) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); assert(ii < size()); - backend()(ii) = value; + backend_ref(ii) = value; } ScalarType get_entry(const size_t ii) const { assert(ii < size()); - return backend_->operator[](ii); + return backend()[ii]; } +protected: inline ScalarType& get_entry_ref(const size_t ii) { - return backend()[ii]; + return backend_[ii]; } inline const ScalarType& get_entry_ref(const size_t ii) const { - return backend()[ii]; + return backend_[ii]; } +public: inline ScalarType& operator[](const size_t ii) { - return get_entry_ref(ii); + return backend()[ii]; } inline const ScalarType& operator[](const size_t ii) const { - return get_entry_ref(ii); + return backend()[ii]; } /// \} @@ -176,6 +188,7 @@ public: virtual std::pair<size_t, RealType> amax() const override final { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); auto result = std::make_pair(size_t(0), RealType(0)); size_t min_index = 0; size_t max_index = 0; @@ -194,10 +207,11 @@ public: template <class T> ScalarType dot(const EigenBaseVector<T, ScalarType>& other) const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (other.size() != size()) DUNE_THROW(Common::Exceptions::shapes_do_not_match, "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - return backend_->transpose() * *(other.backend_); + return backend().transpose() * other.backend(); } // ... dot(...) virtual ScalarType dot(const VectorImpType& other) const override final @@ -207,43 +221,31 @@ public: virtual RealType l1_norm() const override final { - return backend_->template lpNorm<1>(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + return backend().template lpNorm<1>(); } virtual RealType l2_norm() const override final { - return backend_->template lpNorm<2>(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + return backend().template lpNorm<2>(); } virtual RealType sup_norm() const override final { - return backend_->template lpNorm<::Eigen::Infinity>(); - } - - template <class T1, class T2> - void add(const EigenBaseVector<T1, ScalarType>& other, EigenBaseVector<T2, ScalarType>& result) const - { - if (other.size() != size()) - DUNE_THROW(Common::Exceptions::shapes_do_not_match, - "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - if (result.size() != size()) - DUNE_THROW(Common::Exceptions::shapes_do_not_match, - "The size of result (" << result.size() << ") does not match the size of this (" << size() << ")!"); - result.backend() = *backend_ + *(other.backend_); - } // ... add(...) - - virtual void add(const VectorImpType& other, VectorImpType& result) const override final - { - return this->template add<Traits, Traits>(other, result); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + return backend().template lpNorm<::Eigen::Infinity>(); } template <class T> void iadd(const EigenBaseVector<T, ScalarType>& other) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (other.size() != size()) DUNE_THROW(Common::Exceptions::shapes_do_not_match, "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - backend() += *(other.backend_); + backend_ref += other.backend(); } // ... iadd(...) virtual void iadd(const VectorImpType& other) override final @@ -251,30 +253,15 @@ public: return this->template iadd<Traits>(other); } - template <class T1, class T2> - void sub(const EigenBaseVector<T1, ScalarType>& other, EigenBaseVector<T2, ScalarType>& result) const - { - if (other.size() != size()) - DUNE_THROW(Common::Exceptions::shapes_do_not_match, - "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - if (result.size() != size()) - DUNE_THROW(Common::Exceptions::shapes_do_not_match, - "The size of result (" << result.size() << ") does not match the size of this (" << size() << ")!"); - result.backend() = *backend_ - *(other.backend_); - } // ... sub(...) - - virtual void sub(const VectorImpType& other, VectorImpType& result) const override final - { - return this->template sub<Traits, Traits>(other, result); - } - template <class T> void isub(const EigenBaseVector<T, ScalarType>& other) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (other.size() != size()) DUNE_THROW(Common::Exceptions::shapes_do_not_match, "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - backend() -= *(other.backend_); + backend_ref -= *(other.backend_); } // ... isub(...) virtual void isub(const VectorImpType& other) override final @@ -287,16 +274,17 @@ public: //! disambiguation necessary since it exists in multiple bases using VectorInterfaceType::as_imp; -private: +protected: /** * \see ContainerInterface */ - void ensure_uniqueness() const + void ensure_uniqueness() { CHECK_AND_CALL_CRTP(VectorInterfaceType::as_imp().ensure_uniqueness()); VectorInterfaceType::as_imp().ensure_uniqueness(); } +private: #ifndef NDEBUG //! disambiguation necessary since it exists in multiple bases using VectorInterfaceType::crtp_mutex_; @@ -307,7 +295,8 @@ private: friend class EigenRowMajorSparseMatrix<ScalarType>; protected: - mutable std::shared_ptr<BackendType> backend_; + std::shared_ptr<BackendType> backend_; + mutable std::mutex mutex_; }; // class EigenBaseVector #else // HAVE_EIGEN diff --git a/dune/xt/la/container/eigen/dense.hh b/dune/xt/la/container/eigen/dense.hh index 6635df004c6e3aa6f7ad78b8e90d3cb582d55dbd..18bd6591be438ee6804efd47913c705cbc373255 100644 --- a/dune/xt/la/container/eigen/dense.hh +++ b/dune/xt/la/container/eigen/dense.hh @@ -18,6 +18,7 @@ #include <vector> #include <initializer_list> #include <complex> +#include <mutex> #include <boost/numeric/conversion/cast.hpp> @@ -30,6 +31,7 @@ #include <dune/common/typetraits.hh> #include <dune/common/densematrix.hh> #include <dune/common/ftraits.hh> +#include <dune/common/unused.hh> #include <dune/xt/common/exceptions.hh> #include <dune/xt/common/crtp.hh> @@ -175,6 +177,7 @@ public: */ ThisType& operator=(const BackendType& other) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(this->mutex_); backend_ = std::make_shared<BackendType>(other); return *this; } // ... operator=(...) @@ -196,12 +199,16 @@ public: private: using BaseType::backend_; - inline void ensure_uniqueness() const +protected: + inline void ensure_uniqueness() { - if (!backend_.unique()) + if (!backend_.unique()) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(this->mutex_); backend_ = std::make_shared<BackendType>(*(backend_)); + } } // ... ensure_uniqueness(...) +private: friend class EigenBaseVector<internal::EigenDenseVectorTraits<ScalarType>, ScalarType>; }; // class EigenDenseVector @@ -301,6 +308,7 @@ public: */ ThisType& operator=(const BackendType& other) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(this->mutex_); backend_ = std::make_shared<BackendType>(new ScalarType[other.size()], other.size()); backend_->operator=(other); return *this; @@ -313,15 +321,18 @@ public: private: using BaseType::backend_; +protected: inline void ensure_uniqueness() const { if (!backend_.unique()) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(this->mutex_); auto new_backend = std::make_shared<BackendType>(new ScalarType[backend_->size()], backend_->size()); new_backend->operator=(*(backend_)); backend_ = new_backend; } } // ... ensure_uniqueness(...) +private: friend class EigenBaseVector<internal::EigenMappedDenseVectorTraits<ScalarType>, ScalarType>; }; // class EigenMappedDenseVector @@ -361,7 +372,10 @@ public: backend_->setZero(); } - EigenDenseMatrix(const ThisType& other) = default; + EigenDenseMatrix(const ThisType& other) + : backend_(other.backend_) + { + } /** * \note If prune == true, this implementation is not optimal! @@ -419,6 +433,7 @@ public: */ ThisType& operator=(const BackendType& other) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); backend_ = std::make_shared<BackendType>(other); return *this; } @@ -434,7 +449,6 @@ public: const BackendType& backend() const { - ensure_uniqueness(); return *backend_; } @@ -453,16 +467,21 @@ public: ThisType copy() const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); return ThisType(*backend_); } void scal(const ScalarType& alpha) { - backend() *= alpha; + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + backend_ref *= alpha; } void axpy(const ScalarType& alpha, const ThisType& xx) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (!has_equal_shape(xx)) DUNE_THROW(Common::Exceptions::shapes_do_not_match, "The shape of xx (" << xx.rows() << "x" << xx.cols() << ") does not match the shape of this (" @@ -470,8 +489,7 @@ public: << "x" << cols() << ")!"); - const auto& xx_ref = *(xx.backend_); - backend() += alpha * xx_ref; + backend_ref += alpha * xx.backend(); } // ... axpy(...) bool has_equal_shape(const ThisType& other) const @@ -496,83 +514,93 @@ public: template <class T1, class T2> inline void mv(const EigenBaseVector<T1, ScalarType>& xx, EigenBaseVector<T2, ScalarType>& yy) const { - yy.backend().transpose() = backend_->operator*(*xx.backend_); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + yy.backend().transpose() = backend() * xx.backend(); } void add_to_entry(const size_t ii, const size_t jj, const ScalarType& value) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); assert(ii < rows()); assert(jj < cols()); - backend()(ii, jj) += value; + backend_ref(ii, jj) += value; } // ... add_to_entry(...) void set_entry(const size_t ii, const size_t jj, const ScalarType& value) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); assert(ii < rows()); assert(jj < cols()); - backend()(ii, jj) = value; + backend_ref(ii, jj) = value; } // ... set_entry(...) ScalarType get_entry(const size_t ii, const size_t jj) const { assert(ii < rows()); assert(jj < cols()); - return backend_->operator()(ii, jj); + return backend()(ii, jj); } // ... get_entry(...) void clear_row(const size_t ii) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (ii >= rows()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given ii (" << ii << ") is larger than the rows of this (" << rows() << ")!"); - ensure_uniqueness(); for (size_t jj = 0; jj < cols(); ++jj) - backend_->operator()(ii, jj) = ScalarType(0); + backend_ref(ii, jj) = ScalarType(0); } // ... clear_row(...) void clear_col(const size_t jj) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (jj >= cols()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given jj (" << jj << ") is larger than the cols of this (" << cols() << ")!"); - ensure_uniqueness(); for (size_t ii = 0; ii < rows(); ++ii) - backend_->operator()(ii, jj) = ScalarType(0); + backend_ref(ii, jj) = ScalarType(0); } // ... clear_col(...) void unit_row(const size_t ii) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (ii >= cols()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given ii (" << ii << ") is larger than the cols of this (" << cols() << ")!"); if (ii >= rows()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given ii (" << ii << ") is larger than the rows of this (" << rows() << ")!"); - ensure_uniqueness(); for (size_t jj = 0; jj < cols(); ++jj) - backend_->operator()(ii, jj) = ScalarType(0); - backend_->operator()(ii, ii) = ScalarType(1); + backend_ref(ii, jj) = ScalarType(0); + backend_ref(ii, ii) = ScalarType(1); } // ... unit_row(...) void unit_col(const size_t jj) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (jj >= cols()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given jj (" << jj << ") is larger than the cols of this (" << cols() << ")!"); if (jj >= rows()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given jj (" << jj << ") is larger than the rows of this (" << rows() << ")!"); - ensure_uniqueness(); for (size_t ii = 0; ii < rows(); ++ii) - backend_->operator()(ii, jj) = ScalarType(0); - backend_->operator()(jj, jj) = ScalarType(1); + backend_ref(ii, jj) = ScalarType(0); + backend_ref(jj, jj) = ScalarType(1); } // ... unit_col(...) bool valid() const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); for (size_t ii = 0; ii < rows(); ++ii) { for (size_t jj = 0; jj < cols(); ++jj) { - const auto& entry = backend_->operator()(ii, jj); + const auto& entry = backend()(ii, jj); if (Common::isnan(entry) || Common::isinf(entry)) return false; } @@ -584,14 +612,18 @@ public: * \} */ -private: - inline void ensure_uniqueness() const +protected: + inline void ensure_uniqueness() { - if (!backend_.unique()) + if (!backend_.unique()) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); backend_ = std::make_shared<BackendType>(*backend_); + } } // ... ensure_uniqueness(...) - mutable std::shared_ptr<BackendType> backend_; +private: + std::shared_ptr<BackendType> backend_; + mutable std::mutex mutex_; }; // class EigenDenseMatrix #else // HAVE_EIGEN diff --git a/dune/xt/la/container/eigen/sparse.hh b/dune/xt/la/container/eigen/sparse.hh index 02c821ad82add6bc39ccab75b7dbd67ab258bb55..8bf31d6df4acc81b63b72b5571613c95af7556a9 100644 --- a/dune/xt/la/container/eigen/sparse.hh +++ b/dune/xt/la/container/eigen/sparse.hh @@ -17,6 +17,7 @@ #include <type_traits> #include <vector> #include <complex> +#include <mutex> #include <boost/numeric/conversion/cast.hpp> @@ -28,6 +29,7 @@ #include <dune/common/typetraits.hh> #include <dune/common/ftraits.hh> +#include <dune/common/unused.hh> #include <dune/xt/common/math.hh> #include <dune/xt/common/exceptions.hh> @@ -187,6 +189,7 @@ public: */ ThisType& operator=(const BackendType& other) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); backend_ = std::make_shared<BackendType>(other); return *this; } @@ -202,7 +205,6 @@ public: const BackendType& backend() const { - ensure_uniqueness(); return *backend_; } @@ -212,16 +214,21 @@ public: ThisType copy() const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); return ThisType(*backend_); } void scal(const ScalarType& alpha) { - backend() *= alpha; - } // ... scal(...) + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + backend_ref *= alpha; + } void axpy(const ScalarType& alpha, const ThisType& xx) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (!has_equal_shape(xx)) DUNE_THROW(Common::Exceptions::shapes_do_not_match, "The shape of xx (" << xx.rows() << "x" << xx.cols() << ") does not match the shape of this (" @@ -229,8 +236,7 @@ public: << "x" << cols() << ")!"); - const auto& xx_ref = *(xx.backend_); - backend() += alpha * xx_ref; + backend_ref += alpha * xx.backend(); } // ... axpy(...) bool has_equal_shape(const ThisType& other) const @@ -255,53 +261,62 @@ public: template <class T1, class T2> inline void mv(const EigenBaseVector<T1, ScalarType>& xx, EigenBaseVector<T2, ScalarType>& yy) const { - yy.backend().transpose() = backend_->operator*(*xx.backend_); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + yy.backend().transpose() = backend() * xx.backend(); } void add_to_entry(const size_t ii, const size_t jj, const ScalarType& value) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); assert(these_are_valid_indices(ii, jj)); - backend().coeffRef(internal::boost_numeric_cast<EIGEN_size_t>(ii), - internal::boost_numeric_cast<EIGEN_size_t>(jj)) += value; + backend_ref.coeffRef(internal::boost_numeric_cast<EIGEN_size_t>(ii), + internal::boost_numeric_cast<EIGEN_size_t>(jj)) += value; } void set_entry(const size_t ii, const size_t jj, const ScalarType& value) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); assert(these_are_valid_indices(ii, jj)); - backend().coeffRef(internal::boost_numeric_cast<EIGEN_size_t>(ii), internal::boost_numeric_cast<EIGEN_size_t>(jj)) = - value; + backend_ref.coeffRef(internal::boost_numeric_cast<EIGEN_size_t>(ii), + internal::boost_numeric_cast<EIGEN_size_t>(jj)) = value; } ScalarType get_entry(const size_t ii, const size_t jj) const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); assert(ii < rows()); assert(jj < cols()); - return backend_->coeff(internal::boost_numeric_cast<EIGEN_size_t>(ii), + return backend().coeff(internal::boost_numeric_cast<EIGEN_size_t>(ii), internal::boost_numeric_cast<EIGEN_size_t>(jj)); } void clear_row(const size_t ii) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (ii >= rows()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given ii (" << ii << ") is larger than the rows of this (" << rows() << ")!"); - backend().row(internal::boost_numeric_cast<EIGEN_size_t>(ii)) *= ScalarType(0); + backend_ref.row(internal::boost_numeric_cast<EIGEN_size_t>(ii)) *= ScalarType(0); } void clear_col(const size_t jj) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (jj >= cols()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given jj (" << jj << ") is larger than the cols of this (" << cols() << ")!"); - ensure_uniqueness(); - for (size_t row = 0; internal::boost_numeric_cast<EIGEN_size_t>(row) < backend_->outerSize(); ++row) { - for (typename BackendType::InnerIterator row_it(*backend_, internal::boost_numeric_cast<EIGEN_size_t>(row)); + for (size_t row = 0; internal::boost_numeric_cast<EIGEN_size_t>(row) < backend_ref.outerSize(); ++row) { + for (typename BackendType::InnerIterator row_it(backend_ref, internal::boost_numeric_cast<EIGEN_size_t>(row)); row_it; ++row_it) { const size_t col = row_it.col(); if (col == jj) { - backend_->coeffRef(internal::boost_numeric_cast<EIGEN_size_t>(row), - internal::boost_numeric_cast<EIGEN_size_t>(jj)) = ScalarType(0); + backend_ref.coeffRef(internal::boost_numeric_cast<EIGEN_size_t>(row), + internal::boost_numeric_cast<EIGEN_size_t>(jj)) = ScalarType(0); break; } else if (col > jj) break; @@ -311,6 +326,8 @@ public: void unit_row(const size_t ii) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (ii >= cols()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given ii (" << ii << ") is larger than the cols of this (" << cols() << ")!"); @@ -320,31 +337,32 @@ public: if (!these_are_valid_indices(ii, ii)) DUNE_THROW(Common::Exceptions::index_out_of_range, "Diagonal entry (" << ii << ", " << ii << ") is not contained in the sparsity pattern!"); - backend().row(internal::boost_numeric_cast<EIGEN_size_t>(ii)) *= ScalarType(0); - set_entry(ii, ii, ScalarType(1)); + backend_ref.row(internal::boost_numeric_cast<EIGEN_size_t>(ii)) *= ScalarType(0); + backend_ref(ii, ii) = ScalarType(1); } // ... unit_row(...) void unit_col(const size_t jj) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (jj >= cols()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given jj (" << jj << ") is larger than the cols of this (" << cols() << ")!"); if (jj >= rows()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given jj (" << jj << ") is larger than the rows of this (" << rows() << ")!"); - ensure_uniqueness(); - for (size_t row = 0; internal::boost_numeric_cast<EIGEN_size_t>(row) < backend_->outerSize(); ++row) { - for (typename BackendType::InnerIterator row_it(*backend_, internal::boost_numeric_cast<EIGEN_size_t>(row)); + for (size_t row = 0; internal::boost_numeric_cast<EIGEN_size_t>(row) < backend_ref.outerSize(); ++row) { + for (typename BackendType::InnerIterator row_it(backend_ref, internal::boost_numeric_cast<EIGEN_size_t>(row)); row_it; ++row_it) { const size_t col = row_it.col(); if (col == jj) { if (col == row) - backend_->coeffRef(internal::boost_numeric_cast<EIGEN_size_t>(row), - internal::boost_numeric_cast<EIGEN_size_t>(col)) = ScalarType(1); + backend_ref.coeffRef(internal::boost_numeric_cast<EIGEN_size_t>(row), + internal::boost_numeric_cast<EIGEN_size_t>(col)) = ScalarType(1); else - backend_->coeffRef(internal::boost_numeric_cast<EIGEN_size_t>(row), - internal::boost_numeric_cast<EIGEN_size_t>(jj)) = ScalarType(0); + backend_ref.coeffRef(internal::boost_numeric_cast<EIGEN_size_t>(row), + internal::boost_numeric_cast<EIGEN_size_t>(jj)) = ScalarType(0); break; } else if (col > jj) break; @@ -354,10 +372,11 @@ public: bool valid() const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); // iterate over non-zero entries typedef typename BackendType::InnerIterator InnerIterator; - for (EIGEN_size_t ii = 0; ii < backend_->outerSize(); ++ii) { - for (InnerIterator it(*backend_, ii); it; ++it) { + for (EIGEN_size_t ii = 0; ii < backend_().outerSize(); ++ii) { + for (InnerIterator it(backend(), ii); it; ++it) { if (Common::isnan(it.value()) || Common::isinf(it.value())) return false; } @@ -374,20 +393,21 @@ public: pattern(const bool prune = false, const ScalarType eps = Common::FloatCmp::DefaultEpsilon<ScalarType>::value()) const override { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); SparsityPatternDefault ret(rows()); const auto zero = typename Common::FloatCmp::DefaultEpsilon<ScalarType>::Type(0); if (prune) { - for (EIGEN_size_t row = 0; row < backend_->outerSize(); ++row) { - for (typename BackendType::InnerIterator row_it(*backend_, row); row_it; ++row_it) { + for (EIGEN_size_t row = 0; row < backend().outerSize(); ++row) { + for (typename BackendType::InnerIterator row_it(backend(), row); row_it; ++row_it) { const EIGEN_size_t col = row_it.col(); - const auto val = backend_->coeff(row, col); + const auto val = backend().coeff(row, col); if (Common::FloatCmp::ne(val, zero, eps)) ret.insert(boost::numeric_cast<size_t>(row), boost::numeric_cast<size_t>(col)); } } } else { - for (EIGEN_size_t row = 0; row < backend_->outerSize(); ++row) { - for (typename BackendType::InnerIterator row_it(*backend_, row); row_it; ++row_it) + for (EIGEN_size_t row = 0; row < backend().outerSize(); ++row) { + for (typename BackendType::InnerIterator row_it(backend(), row); row_it; ++row_it) ret.insert(boost::numeric_cast<size_t>(row), boost::numeric_cast<size_t>(row_it.col())); } } @@ -398,6 +418,7 @@ public: virtual ThisType pruned(const ScalarType eps = Common::FloatCmp::DefaultEpsilon<ScalarType>::value()) const override final { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); return ThisType(*backend_, true, eps); } @@ -410,8 +431,8 @@ private: return false; if (jj >= cols()) return false; - for (size_t row = ii; internal::boost_numeric_cast<EIGEN_size_t>(row) < backend_->outerSize(); ++row) { - for (typename BackendType::InnerIterator row_it(*backend_, internal::boost_numeric_cast<EIGEN_size_t>(row)); + for (size_t row = ii; internal::boost_numeric_cast<EIGEN_size_t>(row) < backend().outerSize(); ++row) { + for (typename BackendType::InnerIterator row_it(backend(), internal::boost_numeric_cast<EIGEN_size_t>(row)); row_it; ++row_it) { const size_t col = row_it.col(); @@ -424,13 +445,18 @@ private: return false; } // ... these_are_valid_indices(...) - inline void ensure_uniqueness() const +protected: + inline void ensure_uniqueness() { - if (!backend_.unique()) + if (!backend_.unique()) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); backend_ = std::make_shared<BackendType>(*backend_); + } } // ... ensure_uniqueness(...) - mutable std::shared_ptr<BackendType> backend_; +private: + std::shared_ptr<BackendType> backend_; + mutable std::mutex mutex_; }; // class EigenRowMajorSparseMatrix #else // HAVE_EIGEN diff --git a/dune/xt/la/container/istl.hh b/dune/xt/la/container/istl.hh index 7a6f2c58a7ebff84fea254c6e551862e41cca244..c1b0d6b6129dab7b02c02c15a52f401756af9bd3 100644 --- a/dune/xt/la/container/istl.hh +++ b/dune/xt/la/container/istl.hh @@ -17,11 +17,13 @@ #include <vector> #include <initializer_list> #include <complex> +#include <mutex> #include <dune/common/fmatrix.hh> #include <dune/common/fvector.hh> #include <dune/common/typetraits.hh> #include <dune/common/ftraits.hh> +#include <dune/common/unused.hh> #if HAVE_DUNE_ISTL #include <dune/istl/bvector.hh> @@ -120,7 +122,10 @@ public: } } // IstlDenseVector(...) - IstlDenseVector(const ThisType& other) = default; + IstlDenseVector(const ThisType& other) + : backend_(other.backend_) + { + } explicit IstlDenseVector(const BackendType& other, const bool /*prune*/ = false, @@ -153,6 +158,7 @@ public: */ ThisType& operator=(const BackendType& other) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); backend_ = std::make_shared<BackendType>(other); return *this; } @@ -163,6 +169,7 @@ public: ThisType& operator=(const ScalarType& value) { ensure_uniqueness(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); for (auto& element : *this) element = value; return *this; @@ -176,7 +183,6 @@ public: const BackendType& backend() const { - ensure_uniqueness(); return *backend_; } @@ -195,20 +201,25 @@ public: ThisType copy() const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); return ThisType(*backend_); } void scal(const ScalarType& alpha) { - backend() *= alpha; + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + backend_ref *= alpha; } void axpy(const ScalarType& alpha, const ThisType& xx) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (xx.size() != size()) DUNE_THROW(Common::Exceptions::shapes_do_not_match, "The size of x (" << xx.size() << ") does not match the size of this (" << size() << ")!"); - backend().axpy(alpha, *(xx.backend_)); + backend_ref.axpy(alpha, xx.backend()); } bool has_equal_shape(const ThisType& other) const @@ -230,14 +241,18 @@ public: void add_to_entry(const size_t ii, const ScalarType& value) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); assert(ii < size()); - backend()[ii][0] += value; + backend_ref[ii][0] += value; } void set_entry(const size_t ii, const ScalarType& value) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); assert(ii < size()); - backend()[ii][0] = value; + backend_ref[ii][0] = value; } ScalarType get_entry(const size_t ii) const @@ -246,9 +261,10 @@ public: return backend_->operator[](ii)[0]; } +protected: inline ScalarType& get_entry_ref(const size_t ii) { - return backend()[ii][0]; + return backend_->operator[](ii)[0]; } inline const ScalarType& get_entry_ref(const size_t ii) const @@ -256,14 +272,15 @@ public: return backend_->operator[](ii)[0]; } +public: inline ScalarType& operator[](const size_t ii) { - return get_entry_ref(ii); + return backend()[ii][0]; } inline const ScalarType& operator[](const size_t ii) const { - return get_entry_ref(ii); + return backend()[ii][0]; } /// \} @@ -272,103 +289,71 @@ public: virtual ScalarType dot(const ThisType& other) const override final { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (other.size() != size()) DUNE_THROW(Common::Exceptions::shapes_do_not_match, "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - return backend_->dot(*(other.backend_)); + return backend().dot(other.backend()); } // ... dot(...) virtual RealType l1_norm() const override final { - return backend_->one_norm(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + return backend().one_norm(); } virtual RealType l2_norm() const override final { - return backend_->two_norm(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + return backend().two_norm(); } virtual RealType sup_norm() const override final { - return backend_->infinity_norm(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + return backend().infinity_norm(); } - virtual void add(const ThisType& other, ThisType& result) const override final - { - if (other.size() != size()) - DUNE_THROW(Common::Exceptions::shapes_do_not_match, - "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - if (result.size() != size()) - DUNE_THROW(Common::Exceptions::shapes_do_not_match, - "The size of result (" << result.size() << ") does not match the size of this (" << size() << ")!"); - result.backend() = *(backend_); - result.backend() += *(other.backend_); - } // ... add(...) - - virtual ThisType add(const ThisType& other) const override final - { - if (other.size() != size()) - DUNE_THROW(Common::Exceptions::shapes_do_not_match, - "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - ThisType result = copy(); - result.backend_->operator+=(*(other.backend_)); - return result; - } // ... add(...) - virtual void iadd(const ThisType& other) override final { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (other.size() != size()) DUNE_THROW(Common::Exceptions::shapes_do_not_match, "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - backend() += *(other.backend_); + backend_ref += other.backend(); } // ... iadd(...) - virtual void sub(const ThisType& other, ThisType& result) const override final - { - if (other.size() != size()) - DUNE_THROW(Common::Exceptions::shapes_do_not_match, - "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - if (result.size() != size()) - DUNE_THROW(Common::Exceptions::shapes_do_not_match, - "The size of result (" << result.size() << ") does not match the size of this (" << size() << ")!"); - result.backend() = *(backend_); - result.backend() -= *(other.backend_); - } // ... sub(...) - - virtual ThisType sub(const ThisType& other) const override final - { - if (other.size() != size()) - DUNE_THROW(Common::Exceptions::shapes_do_not_match, - "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - ThisType result = copy(); - result.backend_->operator-=(*(other.backend_)); - return result; - } // ... sub(...) - virtual void isub(const ThisType& other) override final { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (other.size() != size()) DUNE_THROW(Common::Exceptions::shapes_do_not_match, "The size of other (" << other.size() << ") does not match the size of this (" << size() << ")!"); - backend() -= (*(other.backend_)); + backend_ref -= other.backend(); } // ... isub(...) /// \} -private: +protected: /** * \see ContainerInterface */ - inline void ensure_uniqueness() const + inline void ensure_uniqueness() { - if (!backend_.unique()) + if (!backend_.unique()) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); backend_ = std::make_shared<BackendType>(*backend_); + } } // ... ensure_uniqueness(...) +private: friend class VectorInterface<internal::IstlDenseVectorTraits<ScalarType>, ScalarType>; friend class IstlRowMajorSparseMatrix<ScalarType>; - mutable std::shared_ptr<BackendType> backend_; + std::shared_ptr<BackendType> backend_; + mutable std::mutex mutex_; }; // class IstlDenseVector /** @@ -409,7 +394,10 @@ public: { } - IstlRowMajorSparseMatrix(const ThisType& other) = default; + IstlRowMajorSparseMatrix(const ThisType& other) + : backend_(other.backend_) + { + } explicit IstlRowMajorSparseMatrix(const BackendType& mat, const bool prune = false, @@ -456,6 +444,7 @@ public: */ ThisType& operator=(const BackendType& other) { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); backend_ = std::make_shared<BackendType>(other); return *this; } // ... operator=(...) @@ -471,7 +460,6 @@ public: const BackendType& backend() const { - ensure_uniqueness(); return *backend_; } @@ -481,16 +469,21 @@ public: ThisType copy() const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); return ThisType(*backend_); } void scal(const ScalarType& alpha) { - backend() *= alpha; + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + backend_ref *= alpha; } void axpy(const ScalarType& alpha, const ThisType& xx) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (!has_equal_shape(xx)) DUNE_THROW(Common::Exceptions::shapes_do_not_match, "The shape of xx (" << xx.rows() << "x" << xx.cols() << ") does not match the shape of this (" @@ -498,7 +491,7 @@ public: << "x" << cols() << ")!"); - backend().axpy(alpha, *(xx.backend_)); + backend_ref.axpy(alpha, xx.backend()); } // ... axpy(...) bool has_equal_shape(const ThisType& other) const @@ -523,19 +516,25 @@ public: inline void mv(const IstlDenseVector<ScalarType>& xx, IstlDenseVector<ScalarType>& yy) const { DUNE_XT_COMMON_TIMING_SCOPE(static_id() + ".mv"); - backend_->mv(*(xx.backend_), yy.backend()); + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); + backend_ref.mv(xx.backend(), yy.backend()); } void add_to_entry(const size_t ii, const size_t jj, const ScalarType& value) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); assert(these_are_valid_indices(ii, jj)); - backend()[ii][jj][0][0] += value; + backend_ref[ii][jj][0][0] += value; } void set_entry(const size_t ii, const size_t jj, const ScalarType& value) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); assert(these_are_valid_indices(ii, jj)); - backend()[ii][jj][0][0] = value; + backend_ref[ii][jj][0][0] = value; } ScalarType get_entry(const size_t ii, const size_t jj) const @@ -550,18 +549,21 @@ public: void clear_row(const size_t ii) { + auto& backend_ref = backend(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (ii >= rows()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given ii (" << ii << ") is larger than the rows of this (" << rows() << ")!"); - backend()[ii] *= ScalarType(0); + backend_ref[ii] *= ScalarType(0); } // ... clear_row(...) void clear_col(const size_t jj) { + ensure_uniqueness(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (jj >= cols()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given jj (" << jj << ") is larger than the cols of this (" << cols() << ")!"); - ensure_uniqueness(); for (size_t ii = 0; ii < rows(); ++ii) { auto& row = backend_->operator[](ii); const auto search_result = row.find(jj); @@ -572,6 +574,8 @@ public: void unit_row(const size_t ii) { + ensure_uniqueness(); + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); if (ii >= cols()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given ii (" << ii << ") is larger than the cols of this (" << cols() << ")!"); @@ -581,13 +585,13 @@ public: if (!backend_->exists(ii, ii)) DUNE_THROW(Common::Exceptions::index_out_of_range, "Diagonal entry (" << ii << ", " << ii << ") is not contained in the sparsity pattern!"); - ensure_uniqueness(); backend_->operator[](ii) *= ScalarType(0); backend_->operator[](ii)[ii] = ScalarType(1); } // ... unit_row(...) void unit_col(const size_t jj) { + ensure_uniqueness(); if (jj >= cols()) DUNE_THROW(Common::Exceptions::index_out_of_range, "Given jj (" << jj << ") is larger than the cols of this (" << cols() << ")!"); @@ -597,18 +601,20 @@ public: if (!backend_->exists(jj, jj)) DUNE_THROW(Common::Exceptions::index_out_of_range, "Diagonal entry (" << jj << ", " << jj << ") is not contained in the sparsity pattern!"); - ensure_uniqueness(); + mutex_.lock(); for (size_t ii = 0; (ii < rows()) && (ii != jj); ++ii) { auto& row = backend_->operator[](ii); const auto search_result = row.find(jj); if (search_result != row.end()) row.operator[](jj)[0][0] = ScalarType(0); } + mutex_.unlock(); set_entry(jj, jj, ScalarType(1)); } // ... unit_col(...) bool valid() const { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); for (size_t ii = 0; ii < rows(); ++ii) { const auto& row_vec = backend_->operator[](ii); for (size_t jj = 0; jj < cols(); ++jj) @@ -634,6 +640,7 @@ public: const typename Common::FloatCmp::DefaultEpsilon<ScalarType>::Type eps = Common::FloatCmp::DefaultEpsilon<ScalarType>::value()) const override final { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); SparsityPatternDefault ret(rows()); if (prune) { return pruned_pattern_from_backend(*backend_, eps); @@ -654,6 +661,7 @@ public: virtual ThisType pruned(const typename Common::FloatCmp::DefaultEpsilon<ScalarType>::Type eps = Common::FloatCmp::DefaultEpsilon<ScalarType>::value()) const override final { + std::lock_guard<std::mutex> DUNE_UNUSED(lock)(mutex_); return ThisType(*backend_, true, eps); } @@ -703,16 +711,19 @@ private: return backend_->exists(ii, jj); } // ... these_are_valid_indices(...) +protected: /** * \see ContainerInterface */ - inline void ensure_uniqueness() const + inline void ensure_uniqueness() { if (!backend_.unique()) backend_ = std::make_shared<BackendType>(*backend_); } // ... ensure_uniqueness(...) - mutable std::shared_ptr<BackendType> backend_; +private: + std::shared_ptr<BackendType> backend_; + mutable std::mutex mutex_; }; // class IstlRowMajorSparseMatrix template <class S> diff --git a/dune/xt/la/container/vector-interface.hh b/dune/xt/la/container/vector-interface.hh index bb553babe7205552699a45c894aa0e88d5ae9cf1..98f85ecf47d5f18ce756d36fd6672aa9efa9f4ea 100644 --- a/dune/xt/la/container/vector-interface.hh +++ b/dune/xt/la/container/vector-interface.hh @@ -84,7 +84,6 @@ public: /** * \brief Get the iith entry. - * \todo Default implement using get_entry_ref! */ inline ScalarType get_entry(const size_t ii) const { @@ -92,6 +91,11 @@ public: return this->as_imp().get_entry(ii); } +protected: + /** + * The purpose of get_entry_ref is to allow direct access to the underlying data without any checks (regarding cow + * or thread safety). This allows default implementations in the interface with locking prior to for-loops. + */ inline ScalarType& get_entry_ref(const size_t ii) { CHECK_CRTP(this->as_imp().get_entry_ref(ii)); @@ -104,6 +108,7 @@ public: return this->as_imp().get_entry_ref(ii); } +public: /// \} /// \name Provided by the interface for convenience! /// \note Those marked as virtual may be implemented more efficiently in a derived class! diff --git a/dune/xt/la/test/CMakeLists.txt b/dune/xt/la/test/CMakeLists.txt index 09fce8073c3666e7724c7762b23bbed215ffcfe5..cc5de15468a67170f6eb6830d172cf34531e2c0d 100644 --- a/dune/xt/la/test/CMakeLists.txt +++ b/dune/xt/la/test/CMakeLists.txt @@ -7,6 +7,8 @@ enable_testing() +set_source_files_properties( ${DUNE_HEADERS} PROPERTIES HEADER_FILE_ONLY 1 ) + BEGIN_TESTCASES(dunextla) END_TESTCASES()