diff --git a/CMakeLists.txt b/CMakeLists.txt index e6aa23e8..3036316d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -84,6 +84,10 @@ nwx_add_pybind11_module( if("${BUILD_TESTING}") set(cxx_test_dir "${CMAKE_CURRENT_LIST_DIR}/tests/cxx") set(cxx_unit_test_dir "${cxx_test_dir}/unit_tests/${PROJECT_NAME}") + set( + cxx_acceptance_test_dir + "${cxx_test_dir}/acceptance_tests/${PROJECT_NAME}" + ) set(python_test_dir "${CMAKE_CURRENT_LIST_DIR}/tests/python") set(python_unit_test_dir "${python_test_dir}/unit_tests") @@ -96,6 +100,14 @@ if("${BUILD_TESTING}") DEPENDS Catch2 ${PROJECT_NAME} ) + cmaize_add_tests( + acceptance_test_${PROJECT_NAME} + SOURCE_DIR "${cxx_acceptance_test_dir}" + INCLUDE_DIRS "${project_src_dir}" + DEPENDS Catch2 ${PROJECT_NAME} + ) + + nwx_add_pybind11_module( py_test_${PROJECT_NAME} INSTALL OFF diff --git a/include/tensorwrapper/allocator/allocator_base.hpp b/include/tensorwrapper/allocator/allocator_base.hpp index ceeddd40..1ebc1308 100644 --- a/include/tensorwrapper/allocator/allocator_base.hpp +++ b/include/tensorwrapper/allocator/allocator_base.hpp @@ -16,9 +16,10 @@ #pragma once #include -#include +#include #include #include +#include namespace tensorwrapper::allocator { @@ -49,19 +50,28 @@ class AllocatorBase : public detail_::PolymorphicBase { using layout_type = layout::Physical; /// Type of a pointer to an object of type layout_type - using layout_pointer = typename layout_type::layout_pointer; + using layout_pointer = std::unique_ptr; + + /// Type of a read-only reference to the layout + using const_layout_reference = const layout_type&; /// Type all buffers derive from using buffer_base_type = buffer::BufferBase; + /// Type of the class defining types for the buffer_base_type class + using buffer_base_traits = types::ClassTraits; + /// Type of a mutable reference to an object of type buffer_base_type - using buffer_base_reference = buffer_base_type&; + using buffer_base_reference = + typename buffer_base_traits::buffer_base_reference; /// Type of a read-only reference to an object of type buffer_base_type - using const_buffer_base_reference = const buffer_base_type&; + using const_buffer_base_reference = + typename buffer_base_traits::const_buffer_base_reference; /// Type of a pointer to an object of type buffer_base_type - using buffer_base_pointer = typename buffer_base_type::buffer_base_pointer; + using buffer_base_pointer = + typename buffer_base_traits::buffer_base_pointer; // ------------------------------------------------------------------------- // -- Ctors and assignment @@ -85,6 +95,10 @@ class AllocatorBase : public detail_::PolymorphicBase { return allocate_(std::move(playout)); } + buffer_base_pointer allocate(const_layout_reference layout) { + return allocate(layout.clone_as()); + } + /** @brief The runtime *this uses for allocating. * * Allocators are tied to runtimes. This method can be used to retrieve @@ -151,6 +165,7 @@ class AllocatorBase : public detail_::PolymorphicBase { * @throw None No throw guarantee. */ explicit AllocatorBase(runtime_view_type rv) : m_rv_(std::move(rv)) {} + /** @brief Creates *this so that it uses the same runtime as @p other. * * @param[in] other The allocator to make a copy of. diff --git a/include/tensorwrapper/allocator/allocator_fwd.hpp b/include/tensorwrapper/allocator/allocator_fwd.hpp new file mode 100644 index 00000000..38c000aa --- /dev/null +++ b/include/tensorwrapper/allocator/allocator_fwd.hpp @@ -0,0 +1,33 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +namespace tensorwrapper::allocator { + +class AllocatorBase; + +template +class Eigen; + +class Local; + +class Replicated; + +template +class Contiguous; + +} // namespace tensorwrapper::allocator \ No newline at end of file diff --git a/include/tensorwrapper/allocator/contiguous.hpp b/include/tensorwrapper/allocator/contiguous.hpp new file mode 100644 index 00000000..058f7906 --- /dev/null +++ b/include/tensorwrapper/allocator/contiguous.hpp @@ -0,0 +1,123 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include +#include + +namespace tensorwrapper::allocator { + +/** @brief Allocator that can create Contiguous buffers. + * + * @tparam FloatType Type of the elements in the contiguous buffer. + */ +template +class Contiguous : public Replicated { +private: + /// Type of *this + using my_type = Contiguous; + + /// Type *this derives from + using base_type = Replicated; + +public: + /// Pull in base types + ///@{ + using base_type::buffer_base_pointer; + using base_type::const_layout_reference; + using base_type::layout_pointer; + ///@} + + /// Type of each element in the tensor + using element_type = FloatType; + + /// Type of the buffer associated with *this + using contiguous_buffer_type = buffer::Contiguous; + using contiguous_pointer = std::unique_ptr; + + /// Type of initializer lists + using rank0_il = typename types::ILTraits::type; + using rank1_il = typename types::ILTraits::type; + using rank2_il = typename types::ILTraits::type; + using rank3_il = typename types::ILTraits::type; + using rank4_il = typename types::ILTraits::type; + + /// Pull in base class's ctors + using base_type::base_type; + + /** @brief Allocates a contiguous pointer given @p layout. + * + * @note These methods shadow the function of the same name in the base + * class. The intent is to avoid needing to rebind a freshly + * allocated buffer when the user already knows it is a Contiguous + * buffer. + * + * @param[in] layout The layout of the tensor to allocate. May be passed as + * a unique_ptr or by reference. If passed by reference + * will be copied. + * + * @return A pointer to the newly allocated buffer::Contiguous object. + */ + ///@{ + contiguous_pointer allocate(const_layout_reference layout) { + return allocate(layout.clone_as()); + } + contiguous_pointer allocate(layout_pointer layout) { + auto p = allocate_(std::move(layout)); + return detail_::static_pointer_cast(p); + } + ///@} + + /// Constructs a contiguous buffer from an initializer list + ///@{ + contiguous_pointer construct(rank0_il il) { return construct_(il); } + contiguous_pointer construct(rank1_il il) { return construct_(il); } + contiguous_pointer construct(rank2_il il) { return construct_(il); } + contiguous_pointer construct(rank3_il il) { return construct_(il); } + contiguous_pointer construct(rank4_il il) { return construct_(il); } + ///@} + + /** @brief Constructs a contiguous buffer and sets all elements to @p value. + * + * @param[in] layout The layout of the buffer to allocate. May be passed + * either by unique_ptr or reference. If passed by + * reference will be copied. + * + * @return A pointer to the newly constructed buffer. + */ + ///@{ + contiguous_pointer construct(const_layout_reference layout, + element_type value) { + return construct(layout.clone_as(), std::move(value)); + } + contiguous_pointer construct(layout_pointer layout, element_type value) { + return construct_(std::move(layout), std::move(value)); + } + ///@} + +protected: + virtual contiguous_pointer construct_(rank0_il il) = 0; + virtual contiguous_pointer construct_(rank1_il il) = 0; + virtual contiguous_pointer construct_(rank2_il il) = 0; + virtual contiguous_pointer construct_(rank3_il il) = 0; + virtual contiguous_pointer construct_(rank4_il il) = 0; + + /// To be overridden by the derived class to implement construct + virtual contiguous_pointer construct_(layout_pointer layout, + element_type value) = 0; +}; + +} // namespace tensorwrapper::allocator \ No newline at end of file diff --git a/include/tensorwrapper/allocator/eigen.hpp b/include/tensorwrapper/allocator/eigen.hpp index 63bf053e..851b5f0a 100644 --- a/include/tensorwrapper/allocator/eigen.hpp +++ b/include/tensorwrapper/allocator/eigen.hpp @@ -15,12 +15,9 @@ */ #pragma once -#include +#include #include - -#ifdef ENABLE_SIGMA -#include -#endif +#include namespace tensorwrapper::allocator { @@ -33,27 +30,34 @@ namespace tensorwrapper::allocator { * This allocator is capable of creating buffers with Eigen tensors in them. * */ -template -class Eigen : public Replicated { +template +class Eigen : public Contiguous { private: /// The type of *this - using my_type = Eigen; + using my_type = Eigen; /// The class *this inherits from - using my_base_type = Replicated; + using my_base_type = Contiguous; public: // Pull in base class's types - using my_base_type::base_pointer; - using my_base_type::buffer_base_pointer; - using my_base_type::buffer_base_reference; - using my_base_type::const_base_reference; - using my_base_type::const_buffer_base_reference; - using my_base_type::layout_pointer; - using my_base_type::runtime_view_type; + using typename my_base_type::base_pointer; + using typename my_base_type::buffer_base_pointer; + using typename my_base_type::buffer_base_reference; + using typename my_base_type::const_base_reference; + using typename my_base_type::const_buffer_base_reference; + using typename my_base_type::contiguous_pointer; + using typename my_base_type::element_type; + using typename my_base_type::layout_pointer; + using typename my_base_type::rank0_il; + using typename my_base_type::rank1_il; + using typename my_base_type::rank2_il; + using typename my_base_type::rank3_il; + using typename my_base_type::rank4_il; + using typename my_base_type::runtime_view_type; /// Type of a buffer containing an Eigen tensor - using eigen_buffer_type = buffer::Eigen; + using eigen_buffer_type = buffer::Eigen; /// Type of a mutable reference to an object of type eigen_buffer_type using eigen_buffer_reference = eigen_buffer_type&; @@ -64,15 +68,6 @@ class Eigen : public Replicated { /// Type of a pointer to an eigen_buffer_type object using eigen_buffer_pointer = std::unique_ptr; - /// Type of a layout which can be used to create an Eigen tensor - using eigen_layout_type = layout::Physical; - - /// Type of a read-only reference to an object of type eigen_layout_type - using const_eigen_layout_reference = const eigen_layout_type&; - - /// Type of a pointer to an eigen_layout_type object - using eigen_layout_pointer = std::unique_ptr; - // Reuse base class's ctors using my_base_type::my_base_type; @@ -91,67 +86,6 @@ class Eigen : public Replicated { */ explicit Eigen(runtime_view_type rv) : my_base_type(std::move(rv)) {} - /** @brief Copies @p layout and dispatches to other overload. - * - * The buffer resulting from an allocator owns its layout. This method is - * a convenience function for when the layout for the buffer has not been - * allocated yet. - * - * @param[in] layout The to copy for the resulting buffer. - * - * @return An uninitialized buffer containing a copy of @p layout. - * - * @throw std::bad_alloc if there is a problem allocating the copy or the - * resulting buffer. Strong throw guarantee. - * @throw std::runtime_error if the provided layout is not compatible with - * *this. See the primary method for more - * details. - */ - eigen_buffer_pointer allocate(const_eigen_layout_reference layout) { - return allocate(std::make_unique(layout)); - } - - /** @brief Primary method for allocating Eigen-based buffers. - * - * This method simply checks that @p playout points to a layout compatible - * with the template parameters of *this and then creates a new Eigen - * tensor object. The elements of the Eigen tensor object are NOT - * initialized. - * - * @param[in] playout A pointer to the layout for the new buffer. - * - * @return An uninitialized buffer containing @p playout. - * - * @throw std::runtime_error if the provided layout does not have the same - * rank as @p Rank. Strong throw guarantee. - * @throw std::bad_alloc if there is a problem allocating the new buffer. - * Strong throw guarantee. - */ - eigen_buffer_pointer allocate(eigen_layout_pointer playout); - - /** @brief Allocates and initializes an Eigen buffer. - * - * @tparam LayoutType The type of @p layout. Must be a type such that - * `allocate(layout)` is a valid call. - * - * @param[in] layout The layout for the buffer. - * @param[in] value The value to initialize the tensor with. - * - * @return A buffer which is allocated and initialized. - * - * @throw std::runtime_error if the provided layout is not compatible with - * the template parameters of *this. Strong throw - * guarantee. - * @throw std::bad_alloc if there is a problem allocating the buffer. - * Strong throw guarantee. - */ - template - eigen_buffer_pointer construct(LayoutType&& layout, FloatType value) { - auto pbuffer = allocate(std::forward(layout)); - pbuffer->value().setConstant(value); - return pbuffer; - } - /** @brief Determines if @p buffer can be rebound as an Eigen buffer. * * Rebinding a buffer allows the same memory to be viewed as a (possibly) @@ -199,49 +133,6 @@ class Eigen : public Replicated { static const_eigen_buffer_reference rebind( const_buffer_base_reference buffer); - /** @brief Is *this value equal to @p rhs? - * - * @tparam FloatType2 The numerical type @p rhs uses for its elements. - * @tparam Rank2 The rank of the tensors allocated by @p rhs. - * - * In addition to the definition of value equal stemming from the - * Replicated base class, two Eigen allocators are only value equal if they - * produce tensors with the same rank and numerical type. - * - * @param[in] rhs The allocator to compare to. - * - * @return True if *this is value equal to @p rhs and false otherwise. - * - * @throw None No throw guarantee. - */ - template - bool operator==(const Eigen& rhs) const noexcept { - if constexpr(!std::is_same_v || Rank != Rank2) { - return false; - } else { - return base_type::operator==(rhs); - } - } - - /** @brief Is this allocator different from @p rhs? - * - * @tparam FloatType2 The type @p rhs uses for floating-point elements. - * @tparam Rank2 The rank of @p rhs - * - * This method defines "different" as "not value equal." See the - * documentation for operator== for the definition of value equal. - * - * @param[in] rhs The allocator to compare against. - * - * @return False if *this is value equal to @p rhs and true otherwise. - * - * @throw None No throw guarantee. - */ - template - bool operator!=(const Eigen& rhs) const noexcept { - return !(*this == rhs); - } - static base_pointer make_eigen_allocator(unsigned int rank, runtime_view_type rv); @@ -253,6 +144,15 @@ class Eigen : public Replicated { */ buffer_base_pointer allocate_(layout_pointer playout) override; + contiguous_pointer construct_(rank0_il il) override; + contiguous_pointer construct_(rank1_il il) override; + contiguous_pointer construct_(rank2_il il) override; + contiguous_pointer construct_(rank3_il il) override; + contiguous_pointer construct_(rank4_il il) override; + + contiguous_pointer construct_(layout_pointer playout, + element_type value) override; + /// Implements clone by calling copy ctor base_pointer clone_() const override { return std::make_unique(*this); @@ -260,34 +160,21 @@ class Eigen : public Replicated { /// Implements are_equal, by deferring to the base's operator== bool are_equal_(const_base_reference rhs) const noexcept override { - return my_base_type::are_equal_impl_(rhs); + return my_base_type::template are_equal_impl_(rhs); } + +private: + template + contiguous_pointer il_construct_(ILType il); }; // ----------------------------------------------------------------------------- // -- Explicit class template declarations // ----------------------------------------------------------------------------- -#define DECLARE_EIGEN_ALLOCATOR(TYPE) \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen - -DECLARE_EIGEN_ALLOCATOR(float); -DECLARE_EIGEN_ALLOCATOR(double); +#define DECLARE_EIGEN_ALLOCATOR(TYPE) extern template class Eigen -#ifdef ENABLE_SIGMA -DECLARE_EIGEN_ALLOCATOR(sigma::UFloat); -DECLARE_EIGEN_ALLOCATOR(sigma::UDouble); -#endif +TW_APPLY_FLOATING_POINT_TYPES(DECLARE_EIGEN_ALLOCATOR); #undef DECLARE_EIGEN_ALLOCATOR diff --git a/include/tensorwrapper/buffer/buffer_base.hpp b/include/tensorwrapper/buffer/buffer_base.hpp index cedd2800..a1dd2801 100644 --- a/include/tensorwrapper/buffer/buffer_base.hpp +++ b/include/tensorwrapper/buffer/buffer_base.hpp @@ -15,10 +15,12 @@ */ #pragma once +#include #include #include #include #include +#include namespace tensorwrapper::buffer { @@ -32,36 +34,51 @@ class BufferBase : public detail_::PolymorphicBase, /// Type of *this using my_type = BufferBase; + /// Traits of my_type + using my_traits = types::ClassTraits; + protected: /// Type *this inherits from using polymorphic_base = detail_::PolymorphicBase; public: /// Type all buffers inherit from - using buffer_base_type = typename polymorphic_base::base_type; + using buffer_base_type = typename my_traits::buffer_base_type; - /// Type of a mutable reference to a buffer_base_type object - using buffer_base_reference = typename polymorphic_base::base_reference; + /// Type of a reference to an object of type buffer_base_type + using buffer_base_reference = typename my_traits::buffer_base_reference; - /// Type of a read-only reference to a buffer_base_type object + /// Type of a reference to a read-only object of type buffer_base_type using const_buffer_base_reference = - typename polymorphic_base::const_base_reference; + typename my_traits::const_buffer_base_reference; /// Type of a pointer to an object of type buffer_base_type - using buffer_base_pointer = typename polymorphic_base::base_pointer; + using buffer_base_pointer = typename my_traits::buffer_base_pointer; /// Type of a pointer to a read-only object of type buffer_base_type using const_buffer_base_pointer = - typename polymorphic_base::const_base_pointer; + typename my_traits::const_buffer_base_pointer; /// Type of the class describing the physical layout of the buffer - using layout_type = layout::LayoutBase; + using layout_type = layout::Physical; /// Type of a read-only reference to a layout using const_layout_reference = const layout_type&; /// Type of a pointer to the layout - using layout_pointer = typename layout_type::layout_pointer; + using layout_pointer = std::unique_ptr; + + /// Type all allocators inherit from + using allocator_base_type = allocator::AllocatorBase; + + /// Type of a pointer to an allocator_base_type object + using allocator_base_pointer = std::unique_ptr; + + /// Type of a mutable reference to an allocator_base_type + using allocator_base_reference = allocator_base_type&; + + /// Type of a read-only reference to an allocator_base_type + using const_allocator_reference = const allocator_base_type&; /// Type used to represent the tensor's rank using rank_type = typename layout_type::size_type; @@ -81,6 +98,18 @@ class BufferBase : public detail_::PolymorphicBase, */ bool has_layout() const noexcept { return static_cast(m_layout_); } + /** @brief Does *this have an allocator? + * + * Default constructed or moved from BufferBase objects will not have + * allocators. This method is used to determine if *this has an allocator + * or not. + * + * @throw None No throw guarantee. + */ + bool has_allocator() const noexcept { + return static_cast(m_allocator_); + } + /** @brief Retrieves the layout of *this. * * This method can be used to retrieve the layout associated with *this, @@ -97,6 +126,38 @@ class BufferBase : public detail_::PolymorphicBase, return *m_layout_; } + /** @brief Retrieves the allocator of *this. + * + * This method can be used to retrieve the allocator used to allocate + * *this, assuming *this was provided an allocator. See has_allocator for + * determining if *this has an allocator or not. + * + * @return A mutable reference to the allocator. + * + * @throw std::runtime_error if *this does not have an allocator. Strong + * throw guarantee. + */ + allocator_base_reference allocator() { + assert_layout_(); + return *m_allocator_; + } + + /** @brief Retrieves the allocator of *this. + * + * This method can be used to retrieve the allocator used to allocate + * *this, assuming *this was provided an allocator. See has_allocator for + * determining if *this has an allocator or not. + * + * @return A read-only reference to the allocator. + * + * @throw std::runtime_error if *this does not have an allocator. Strong + * throw guarantee. + */ + const_allocator_reference allocator() const { + assert_layout_(); + return *m_allocator_; + } + rank_type rank() const noexcept { return has_layout() ? layout().rank() : 0; } @@ -119,8 +180,12 @@ class BufferBase : public detail_::PolymorphicBase, */ bool operator==(const BufferBase& rhs) const noexcept { if(has_layout() != rhs.has_layout()) return false; - if(!has_layout()) return true; - return m_layout_->are_equal(*rhs.m_layout_); + if(has_allocator() != rhs.has_allocator()) return false; + if(has_layout() && m_layout_->are_different(*rhs.m_layout_)) + return false; + if(has_allocator() && m_allocator_->are_different(*rhs.m_allocator_)) + return false; + return true; } /** @brief Is *this different from @p rhs? @@ -152,7 +217,7 @@ class BufferBase : public detail_::PolymorphicBase, * * @throw None No throw guarantee. */ - BufferBase() : BufferBase(nullptr) {} + BufferBase() : BufferBase(nullptr, nullptr) {} /** @brief Creates a buffer initialized with a copy of @p layout. * @@ -161,8 +226,9 @@ class BufferBase : public detail_::PolymorphicBase, * @throw std::bad_alloc if there is a problem allocating the copy of * @p layout. Strong throw guarantee. */ - explicit BufferBase(const_layout_reference layout) : - BufferBase(layout.clone()) {} + explicit BufferBase(const_layout_reference layout, + const_allocator_reference allocator) : + BufferBase(layout.clone_as(), allocator.clone()) {} /** @brief Creates a buffer which owns the layout pointed to by @p playout. * @@ -171,8 +237,9 @@ class BufferBase : public detail_::PolymorphicBase, * @throw None No throw guarantee. */ - explicit BufferBase(layout_pointer playout) noexcept : - m_layout_(std::move(playout)) {} + explicit BufferBase(layout_pointer playout, + allocator_base_pointer pallocator) noexcept : + m_layout_(std::move(playout)), m_allocator_(std::move(pallocator)) {} /** @brief Creates a buffer by deep copying @p other. * @@ -182,7 +249,10 @@ class BufferBase : public detail_::PolymorphicBase, * @p other. Strong throw guarantee. */ BufferBase(const BufferBase& other) : - m_layout_(other.m_layout_ ? other.m_layout_->clone() : nullptr) {} + m_layout_(other.m_layout_ ? other.m_layout_->clone_as() : + nullptr), + m_allocator_(other.m_allocator_ ? other.m_allocator_->clone() : nullptr) { + } /** @brief Replaces the state in *this with a deep copy of the state in * @p rhs. @@ -196,8 +266,13 @@ class BufferBase : public detail_::PolymorphicBase, */ BufferBase& operator=(const BufferBase& rhs) { if(this != &rhs) { - auto temp = rhs.has_layout() ? rhs.m_layout_->clone() : nullptr; - temp.swap(m_layout_); + auto temp_layout = rhs.has_layout() ? + rhs.m_layout_->clone_as() : + nullptr; + auto temp_allocator = + rhs.has_allocator() ? rhs.m_allocator_->clone() : nullptr; + temp_layout.swap(m_layout_); + temp_allocator.swap(m_allocator_); } return *this; } @@ -218,6 +293,11 @@ class BufferBase : public detail_::PolymorphicBase, const_labeled_reference rhs) override; private: + template + dsl_reference binary_op_common_(FxnType&& fxn, label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs); + /// Throws std::runtime_error when there is no layout void assert_layout_() const { if(has_layout()) return; @@ -225,8 +305,18 @@ class BufferBase : public detail_::PolymorphicBase, "Buffer has no layout. Was it default initialized?"); } + /// Throws std::runtime_error when there is no allocator + void assert_allocator_() const { + if(has_allocator()) return; + throw std::runtime_error( + "Buffer has no allocator. Was it default initialized?"); + } + /// The layout of *this layout_pointer m_layout_; + + /// The allocator of *this + allocator_base_pointer m_allocator_; }; } // namespace tensorwrapper::buffer diff --git a/include/tensorwrapper/buffer/buffer_fwd.hpp b/include/tensorwrapper/buffer/buffer_fwd.hpp index 51902238..063a9813 100644 --- a/include/tensorwrapper/buffer/buffer_fwd.hpp +++ b/include/tensorwrapper/buffer/buffer_fwd.hpp @@ -20,7 +20,10 @@ namespace tensorwrapper::buffer { class BufferBase; -template +template +class Contiguous; + +template class Eigen; class Local; diff --git a/include/tensorwrapper/buffer/contiguous.hpp b/include/tensorwrapper/buffer/contiguous.hpp index 37094750..84423565 100644 --- a/include/tensorwrapper/buffer/contiguous.hpp +++ b/include/tensorwrapper/buffer/contiguous.hpp @@ -16,6 +16,7 @@ #pragma once #include +#include namespace tensorwrapper::buffer { @@ -39,6 +40,12 @@ class Contiguous : public Replicated { /// Type of each element using element_type = FloatType; + /// Type of a mutable reference to an object of type element_type + using reference = element_type&; + + /// Type of a read-only reference to an object of type element_type + using const_reference = const element_type&; + /// Type of a pointer to a mutable element_type object using pointer = element_type*; @@ -48,6 +55,9 @@ class Contiguous : public Replicated { /// Type used for offsets and indexing using size_type = std::size_t; + /// Type of a multi-dimensional index + using index_vector = std::vector; + // Pull in base's ctors using my_base_type::my_base_type; @@ -60,6 +70,56 @@ class Contiguous : public Replicated { /// Returns a read-only pointer to the first element in contiguous memory const_pointer data() const noexcept { return data_(); } + /** @brief Retrieves a tensor element by offset. + * + * @tparam Args The types of each offset. Must decay to integral types. + * + * @param[in] args The offsets such that the i-th value in @p args is the + * offset of the element along the i-th mode of the tensor. + * + * @return A mutable reference to the element. + * + * @throw std::runtime_error if the number of indices does not match the + * rank of the tensor. Strong throw guarantee. + */ + template + reference at(Args&&... args) { + static_assert( + std::conjunction_v>...>, + "Offsets must be integral types"); + if(sizeof...(Args) != this->rank()) + throw std::runtime_error("Number of offsets must match rank"); + return get_elem_( + index_vector{detail_::to_size_t(std::forward(args))...}); + } + + /** @brief Retrieves a tensor element by offset. + * + * @tparam Args The types of each offset. Must decay to integral types. + * + * This method is the same as the non-const version except that the result + * is read-only. See the documentation for the mutable version for more + * details. + * + * @param[in] args The offsets such that the i-th value in @p args is the + * offset of the element along the i-th mode of the tensor. + * + * @return A read-only reference to the element. + * + * @throw std::runtime_error if the number of indices does not match the + * rank of the tensor. Strong throw guarantee. + */ + template + const_reference at(Args&&... args) const { + static_assert( + std::conjunction_v>...>, + "Offsets must be integral types"); + if(sizeof...(Args) != this->rank()) + throw std::runtime_error("Number of offsets must match rank"); + return get_elem_( + index_vector{detail_::to_size_t(std::forward(args))...}); + } + protected: /// Derived class can override if it likes virtual size_type size_() const noexcept { return layout().shape().size(); } @@ -69,6 +129,12 @@ class Contiguous : public Replicated { /// Derived class should implement according to data() const description virtual const_pointer data_() const noexcept = 0; + + /// Derived class should implement according to operator()() + virtual reference get_elem_(index_vector index) = 0; + + /// Derived class should implement according to operator()()const + virtual const_reference get_elem_(index_vector index) const = 0; }; } // namespace tensorwrapper::buffer \ No newline at end of file diff --git a/include/tensorwrapper/buffer/eigen.hpp b/include/tensorwrapper/buffer/eigen.hpp index 8446ead9..8fef4a7d 100644 --- a/include/tensorwrapper/buffer/eigen.hpp +++ b/include/tensorwrapper/buffer/eigen.hpp @@ -15,57 +15,63 @@ */ #pragma once -#include #include - -#ifdef ENABLE_SIGMA -#include -#endif +#include namespace tensorwrapper::buffer { +namespace detail_ { +template +class EigenPIMPL; + +} -/** @brief A buffer which wraps an Eigen tensor. +/** @brief A buffer which wraps an Eigen object. * - * @tparam FloatType The type used to store the elements of the tensor. - * @tparam Rank The rank of the tensor. + * @tparam FloatType The type used to store the elements of the object. * + * Right now the backend is always an Eigen Tensor, but concievably it could + * be generalized to be matrices or Eigen's map class. */ -template +template class Eigen : public Contiguous { private: /// Type of *this - using my_type = Eigen; + using my_type = Eigen; /// Type *this derives from using my_base_type = Contiguous; public: /// Pull in base class's types + using typename my_base_type::allocator_base_pointer; using typename my_base_type::buffer_base_pointer; + using typename my_base_type::const_allocator_reference; using typename my_base_type::const_buffer_base_reference; using typename my_base_type::const_labeled_reference; using typename my_base_type::const_layout_reference; using typename my_base_type::const_pointer; + using typename my_base_type::const_reference; using typename my_base_type::dsl_reference; + using typename my_base_type::index_vector; using typename my_base_type::label_type; + using typename my_base_type::layout_pointer; + using typename my_base_type::layout_type; using typename my_base_type::pointer; using typename my_base_type::polymorphic_base; + using typename my_base_type::reference; + using typename my_base_type::size_type; - /// Type of a rank @p Rank tensor using floats of type @p FloatType - using data_type = eigen::data_type; - - /// Mutable reference to an object of type data_type - using data_reference = data_type&; - - /// Read-only reference to an object of type data_type - using const_data_reference = const data_type&; + using pimpl_type = detail_::EigenPIMPL; + using pimpl_pointer = std::unique_ptr; + using pimpl_reference = pimpl_type&; + using const_pimpl_reference = const pimpl_type&; /** @brief Creates a buffer with no layout and a default initialized * tensor. * * @throw None No throw guarantee. */ - Eigen() noexcept = default; + Eigen() noexcept; /** @brief Wraps the provided tensor. * @@ -78,9 +84,13 @@ class Eigen : public Contiguous { * @throw std::bad_alloc if there is a problem copying @p layout. Strong * throw guarantee. */ - template - Eigen(DataType&& t, const_layout_reference layout) : - my_base_type(layout), m_tensor_(std::forward(t)) {} + Eigen(pimpl_pointer pimpl, const_layout_reference layout, + const_allocator_reference allocator) : + Eigen(std::move(pimpl), layout.template clone_as(), + allocator.clone()) {} + + Eigen(pimpl_pointer pimpl, layout_pointer playout, + allocator_base_pointer pallocator); /** @brief Initializes *this with a copy of @p other. * @@ -89,7 +99,7 @@ class Eigen : public Contiguous { * @throw std::bad_alloc if there is a problem allocating the copy. Strong * throw guarantee. */ - Eigen(const Eigen& other) = default; + Eigen(const Eigen& other); /** @brief Initializes *this with the state from @p other. * @@ -99,7 +109,7 @@ class Eigen : public Contiguous { * * @throw None No throw guarantee. */ - Eigen(Eigen&& other) = default; + Eigen(Eigen&& other) noexcept; /** @brief Replaces the state in *this with a copy of the state in @p rhs. * @@ -110,7 +120,7 @@ class Eigen : public Contiguous { * @throw std::bad_alloc if the copy fails to allocate memory. Strong * throw guarantee. */ - Eigen& operator=(const Eigen& rhs) = default; + Eigen& operator=(const Eigen& rhs); /** @brief Replaces the state in *this with the state in @p rhs. * @@ -122,32 +132,22 @@ class Eigen : public Contiguous { * * @throw None No throw guarantee. */ - Eigen& operator=(Eigen&& rhs) = default; + Eigen& operator=(Eigen&& rhs) noexcept; + + /// Defaulted no throw dtor + ~Eigen() noexcept; // ------------------------------------------------------------------------- - // -- State accessors + // -- Utility methods // ------------------------------------------------------------------------- - /** @brief Allows direct access to the wrapped tensor. - * - * - * @return A mutable reference to the wrapped tensor. + /** @brief Exchanges the contents of *this with @p other. * - * @throw None No throw guarantee - */ - auto& value() { return m_tensor_; } - - /** @brief Provides direct access to the wrapped tensor. - * - * @return A read-only reference to the wrapped tensor. + * @param[in,out] other The buffer to swap state with. * * @throw None No throw guarantee. */ - const auto& value() const { return m_tensor_; } - - // ------------------------------------------------------------------------- - // -- Utility methods - // ------------------------------------------------------------------------- + void swap(Eigen& other) noexcept; /** @brief Is *this value equal to @p rhs? * @@ -164,11 +164,7 @@ class Eigen : public Contiguous { * * @throw None No throw guarantee. */ - bool operator==(const Eigen& rhs) const noexcept { - if(my_base_type::operator!=(rhs)) return false; - eigen::data_type r = (m_tensor_ - rhs.m_tensor_).sum(); - return r() == FloatType(0.0); - } + bool operator==(const Eigen& rhs) const noexcept; /** @brief Is *this different from @p rhs? * @@ -185,14 +181,10 @@ class Eigen : public Contiguous { protected: /// Implements clone by calling copy ctor - buffer_base_pointer clone_() const override { - return std::make_unique(*this); - } + buffer_base_pointer clone_() const override; /// Implements are_equal by calling are_equal_impl_ - bool are_equal_(const_buffer_base_reference rhs) const noexcept override { - return my_base_type::template are_equal_impl_(rhs); - } + bool are_equal_(const_buffer_base_reference rhs) const noexcept override; /// Implements addition_assignment by calling addition_assignment on state dsl_reference addition_assignment_(label_type this_labels, @@ -213,48 +205,45 @@ class Eigen : public Contiguous { dsl_reference permute_assignment_(label_type this_labels, const_labeled_reference rhs) override; + /// Scales *this by @p scalar dsl_reference scalar_multiplication_(label_type this_labels, double scalar, const_labeled_reference rhs) override; - pointer data_() noexcept override { return m_tensor_.data(); } + /// Implements getting the raw pointer + pointer data_() noexcept override; - const_pointer data_() const noexcept override { return m_tensor_.data(); } + /// Implements getting the raw pointer (read-only) + const_pointer data_() const noexcept override; + + /// Implements mutable element access + reference get_elem_(index_vector index) override; + + /// Implements read-only element access + const_reference get_elem_(index_vector index) const override; /// Implements to_string typename polymorphic_base::string_type to_string_() const override; private: - dsl_reference hadamard_(label_type this_labels, const_labeled_reference lhs, - const_labeled_reference rhs); + /// True if *this has a PIMPL + bool has_pimpl_() const noexcept; + + /// Throws std::runtime_error if *this has no PIMPL + void assert_pimpl_() const; - dsl_reference contraction_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs); + /// Asserts *this has a PIMPL then returns it + pimpl_reference pimpl_(); - /// The actual Eigen tensor - data_type m_tensor_; + /// Assert *this has a PIMPL then returns it + const_pimpl_reference pimpl_() const; + + /// The object actually implementing *this + pimpl_pointer m_pimpl_; }; -#define DECLARE_EIGEN_BUFFER(TYPE) \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen; \ - extern template class Eigen - -DECLARE_EIGEN_BUFFER(float); -DECLARE_EIGEN_BUFFER(double); - -#ifdef ENABLE_SIGMA -DECLARE_EIGEN_BUFFER(sigma::UFloat); -DECLARE_EIGEN_BUFFER(sigma::UDouble); -#endif +#define DECLARE_EIGEN_BUFFER(TYPE) extern template class Eigen + +TW_APPLY_FLOATING_POINT_TYPES(DECLARE_EIGEN_BUFFER); #undef DECLARE_EIGEN_BUFFER diff --git a/include/tensorwrapper/detail_/integer_utilities.hpp b/include/tensorwrapper/detail_/integer_utilities.hpp new file mode 100644 index 00000000..88dbc0bf --- /dev/null +++ b/include/tensorwrapper/detail_/integer_utilities.hpp @@ -0,0 +1,65 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include +#include + +namespace tensorwrapper::detail_ { + +/** @brief Safely converts objects to std::size_t. + * + * @tparam The integral type of the input. + * + * This function will ensure at compile time that @tparam T is an integral type + * and will assert that it is greater than equal to 0 at runtime. + * + * @note assert is used, instead of throw, so that the overhead for the checks + * can be disabled in Release mode. Given that this function is used in + * the getting/setting of tensor elements by offsets, its overhead could + * conceivably add up. + * + * @param[in] i The integer to convert to `std::size_t` + * + * @return @p i cast to a `std::size_t` + */ +template +std::size_t to_size_t(T i) { + static_assert(std::is_integral_v>); + assert(i >= 0); + return i; +} + +/** @brief Safely converts integral objects to long. + * + * @tparam The type of @p i. Must be an integral type. + * + * @param[in] i The integer we are converting. + * + * @note See the note on to_size_t for details on bounds checking. + * + * @return @p i cast to a `long`. + */ +template +long to_long(T i) { + static_assert(std::is_integral_v>); + if constexpr(std::is_same_v) { + assert(i < std::numeric_limits::max()); + } + return i; +} + +} // namespace tensorwrapper::detail_ diff --git a/include/tensorwrapper/layout/layout_base.hpp b/include/tensorwrapper/layout/layout_base.hpp index 7ac8f6ae..83f1bd6e 100644 --- a/include/tensorwrapper/layout/layout_base.hpp +++ b/include/tensorwrapper/layout/layout_base.hpp @@ -15,6 +15,7 @@ */ #pragma once +#include #include #include #include @@ -75,6 +76,8 @@ class LayoutBase : public tensorwrapper::detail_::PolymorphicBase, // -- Ctors and dtor // ------------------------------------------------------------------------- + LayoutBase() = default; + /** @brief Initialize by copy ctor * * This ctor is used when the user does not want to relinquish ownership of @@ -156,7 +159,10 @@ class LayoutBase : public tensorwrapper::detail_::PolymorphicBase, * * @throw None No throw guarantee */ - const_shape_reference shape() const { return *m_shape_; } + const_shape_reference shape() const { + assert(m_shape_); + return *m_shape_; + } /** @brief Provides read-only access to the symmetry of the layout. * @@ -164,7 +170,10 @@ class LayoutBase : public tensorwrapper::detail_::PolymorphicBase, * * @throw None No throw guarantee */ - const_symmetry_reference symmetry() const { return *m_symmetry_; } + const_symmetry_reference symmetry() const { + assert(m_symmetry_); + return *m_symmetry_; + } /** @brief Provides access to the sparsity of the layout. * @@ -172,7 +181,10 @@ class LayoutBase : public tensorwrapper::detail_::PolymorphicBase, * * @throw None No throw guarantee. */ - const_sparsity_reference sparsity() const { return *m_sparsity_; } + const_sparsity_reference sparsity() const { + assert(m_sparsity_); + return *m_sparsity_; + } /** @brief The rank of the tensor this layout describes. * @@ -184,7 +196,7 @@ class LayoutBase : public tensorwrapper::detail_::PolymorphicBase, * * @throw None No throw guarantee. */ - size_type rank() const noexcept { return m_shape_->rank(); } + size_type rank() const noexcept { return m_shape_ ? m_shape_->rank() : 0; } // ------------------------------------------------------------------------- // -- Utility methods @@ -265,6 +277,12 @@ class LayoutBase : public tensorwrapper::detail_::PolymorphicBase, const_labeled_reference rhs) override; private: + /// Factorized implementation for binary operations + template + dsl_reference binary_common_(FxnType&& fxn, label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs); + /// Asserts that *this is in a valid state void assert_valid_state_() const; diff --git a/include/tensorwrapper/layout/logical.hpp b/include/tensorwrapper/layout/logical.hpp index 38826de5..10feaccb 100644 --- a/include/tensorwrapper/layout/logical.hpp +++ b/include/tensorwrapper/layout/logical.hpp @@ -40,6 +40,8 @@ class Logical : public LayoutBase { using my_base_type::sparsity_pointer; using my_base_type::symmetry_pointer; + Logical() = default; + Logical(const_shape_reference shape, const_symmetry_reference symmetry, const_sparsity_reference sparsity) : my_base_type(shape, symmetry, sparsity) {} diff --git a/include/tensorwrapper/layout/physical.hpp b/include/tensorwrapper/layout/physical.hpp index c7e2aeb6..cc73890a 100644 --- a/include/tensorwrapper/layout/physical.hpp +++ b/include/tensorwrapper/layout/physical.hpp @@ -39,6 +39,8 @@ class Physical : public LayoutBase { using my_base_type::layout_pointer; using my_base_type::size_type; + Physical() = default; + Physical(const_shape_reference shape, const_symmetry_reference symmetry, const_sparsity_reference sparsity) : my_base_type(shape, symmetry, sparsity) {} diff --git a/include/tensorwrapper/operations/approximately_equal.hpp b/include/tensorwrapper/operations/approximately_equal.hpp new file mode 100644 index 00000000..da989855 --- /dev/null +++ b/include/tensorwrapper/operations/approximately_equal.hpp @@ -0,0 +1,43 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include + +namespace tensorwrapper::operations { + +/** @brief Determines if @p lhs equals @p rhs to within @p tol. + * + * Two tensors are approximately equal if: + * - They are the same rank. + * - If for each element in @p lhs the corresponding element in @p rhs d + * + * + * @param[in] lhs The first tensor being compared. + * @param[in] rhs The second tensor being compared. + * @param[in] tol The absolute tolerance for establishing equality. Two + * elements that differ by less than @p tol are equal. + * + * @return True if @p lhs is approximately equal to @p rhs and false otherwise. + * + * @throw std::runtime_error if the tensors do not contain doubles. Strong + * throw guarantee. + * + */ +bool approximately_equal(const Tensor& lhs, const Tensor& rhs, + double tol = 1e-16); + +} // namespace tensorwrapper::operations \ No newline at end of file diff --git a/include/tensorwrapper/operations/operations.hpp b/include/tensorwrapper/operations/operations.hpp new file mode 100644 index 00000000..8cba2b83 --- /dev/null +++ b/include/tensorwrapper/operations/operations.hpp @@ -0,0 +1,21 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include + +/// Namespace for free functions that act on tensors +namespace tensorwrapper::operations {} \ No newline at end of file diff --git a/include/tensorwrapper/tensor/detail_/tensor_input.hpp b/include/tensorwrapper/tensor/detail_/tensor_input.hpp index 825ce648..744e6437 100644 --- a/include/tensorwrapper/tensor/detail_/tensor_input.hpp +++ b/include/tensorwrapper/tensor/detail_/tensor_input.hpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/include/tensorwrapper/tensor/tensor_class.hpp b/include/tensorwrapper/tensor/tensor_class.hpp index 57c2836f..a4ec0655 100644 --- a/include/tensorwrapper/tensor/tensor_class.hpp +++ b/include/tensorwrapper/tensor/tensor_class.hpp @@ -402,6 +402,12 @@ class Tensor : public detail_::DSLBase, typename polymorphic_base::string_type to_string_() const override; private: + /// Code factorization for implementing binary operations + template + dsl_reference binary_common_(FxnType&& fxn, label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs); + /// All ctors ultimately dispatch to this ctor Tensor(pimpl_pointer pimpl) noexcept; diff --git a/include/tensorwrapper/tensorwrapper.hpp b/include/tensorwrapper/tensorwrapper.hpp index 864aa32c..eeae2067 100644 --- a/include/tensorwrapper/tensorwrapper.hpp +++ b/include/tensorwrapper/tensorwrapper.hpp @@ -16,15 +16,16 @@ #pragma once #include -#include #include #include #include #include +#include #include #include #include #include +#include /** @brief Contains the components of the TensorWrapper library. */ namespace tensorwrapper {} diff --git a/include/tensorwrapper/types/buffer_traits.hpp b/include/tensorwrapper/types/buffer_traits.hpp new file mode 100644 index 00000000..6df3cdb4 --- /dev/null +++ b/include/tensorwrapper/types/buffer_traits.hpp @@ -0,0 +1,42 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include +#include +#include + +namespace tensorwrapper::types { + +template<> +struct ClassTraits { + /// Type all buffers inherit from + using buffer_base_type = buffer::BufferBase; + + /// Type of a mutable reference to a buffer_base_type object + using buffer_base_reference = buffer_base_type&; + + /// Type of a read-only reference to a buffer_base_type object + using const_buffer_base_reference = const buffer_base_type&; + + /// Type of a unique_ptr to a mutable buffer_base_type + using buffer_base_pointer = std::unique_ptr; + + /// Type of a unique_ptr to a mutable buffer_base_type + using const_buffer_base_pointer = std::unique_ptr; +}; + +} // namespace tensorwrapper::types \ No newline at end of file diff --git a/include/tensorwrapper/types/class_traits.hpp b/include/tensorwrapper/types/class_traits.hpp new file mode 100644 index 00000000..2b6dfcfd --- /dev/null +++ b/include/tensorwrapper/types/class_traits.hpp @@ -0,0 +1,34 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +namespace tensorwrapper::types { + +/** @brief Defines the member types for the @p ClassType class. + * + * This class will serve as the single-source of truth for defining the member + * types for the @p ClassType class. The primary template is not defined and + * developers are expected to specialize the template for each @p ClassType + * in the TensorWrapper library. + * + * @tparam ClassType The, possibly cv-qualified, type of the class which *this + * defines the types for. + */ +template +struct ClassTraits; + +} // namespace tensorwrapper::types \ No newline at end of file diff --git a/include/tensorwrapper/types/floating_point.hpp b/include/tensorwrapper/types/floating_point.hpp new file mode 100644 index 00000000..0d273333 --- /dev/null +++ b/include/tensorwrapper/types/floating_point.hpp @@ -0,0 +1,49 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include +#ifdef ENABLE_SIGMA +#include +#endif + +namespace tensorwrapper::types { + +#ifdef ENABLE_SIGMA +using ufloat = sigma::UFloat; +using udouble = sigma::UDouble; + +using floating_point_types = std::tuple; + +#define TW_APPLY_FLOATING_POINT_TYPES(MACRO_IN) \ + MACRO_IN(float); \ + MACRO_IN(double); \ + MACRO_IN(types::ufloat); \ + MACRO_IN(types::udouble) + +#else +using ufloat = float; +using udouble = double; + +using floating_point_types = std::tuple; + +#define TW_APPLY_FLOATING_POINT_TYPES(MACRO_IN) \ + MACRO_IN(float); \ + MACRO_IN(double) + +#endif + +} // namespace tensorwrapper::types \ No newline at end of file diff --git a/include/tensorwrapper/types/il_traits.hpp b/include/tensorwrapper/types/il_traits.hpp new file mode 100644 index 00000000..d0485af1 --- /dev/null +++ b/include/tensorwrapper/types/il_traits.hpp @@ -0,0 +1,36 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include + +namespace tensorwrapper::types { + +template +struct ILTraits; + +template +struct ILTraits { + using type = FloatType; +}; + +template +struct ILTraits { + using type = + std::initializer_list::type>; +}; + +} // namespace tensorwrapper::types \ No newline at end of file diff --git a/include/tensorwrapper/types/types.hpp b/include/tensorwrapper/types/types.hpp new file mode 100644 index 00000000..37b19a7e --- /dev/null +++ b/include/tensorwrapper/types/types.hpp @@ -0,0 +1,24 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include +#include +#include +#include + +/// Namespace for defining types used throughout the TensorWrapper library +namespace tensorwrapper::types {} \ No newline at end of file diff --git a/src/tensorwrapper/allocator/allocator_base.cpp b/src/tensorwrapper/allocator/allocator_base.cpp new file mode 100644 index 00000000..049b76d2 --- /dev/null +++ b/src/tensorwrapper/allocator/allocator_base.cpp @@ -0,0 +1,15 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ diff --git a/src/tensorwrapper/allocator/eigen.cpp b/src/tensorwrapper/allocator/eigen.cpp index 60905765..a97f182c 100644 --- a/src/tensorwrapper/allocator/eigen.cpp +++ b/src/tensorwrapper/allocator/eigen.cpp @@ -14,41 +14,21 @@ * limitations under the License. */ +#include "../buffer/detail_/eigen_tensor.hpp" +#include "../tensor/detail_/il_utils.hpp" #include #include #include #include namespace tensorwrapper::allocator { -namespace { -template -auto unwrap_shape(const ShapeType& shape, std::index_sequence) { - // XXX: This is a hack until we have a general Shape API in place - auto const_shape = static_cast(shape); - return EigenTensorType(const_shape.extent(Is)...); -} - -} // namespace -#define TPARAMS template -#define EIGEN Eigen - -TPARAMS -typename EIGEN::eigen_buffer_pointer EIGEN::allocate( - eigen_layout_pointer playout) { - using eigen_data_type = typename eigen_buffer_type::data_type; - if(playout->shape().rank() != Rank) - throw std::runtime_error("Rank of the layout is not compatible"); - - return std::make_unique( - unwrap_shape(playout->shape(), - std::make_index_sequence()), - *playout); -} +#define TPARAMS template +#define EIGEN Eigen TPARAMS bool EIGEN::can_rebind(const_buffer_base_reference buffer) { - auto pbuffer = dynamic_cast(&buffer); + auto pbuffer = dynamic_cast*>(&buffer); return pbuffer != nullptr; } @@ -67,39 +47,76 @@ typename EIGEN::const_eigen_buffer_reference EIGEN::rebind( throw std::runtime_error("Can not rebind buffer"); } -#define ALLOCATE_CONDITION(RANK) \ - if(rank == RANK) return std::make_unique>(rv) +// ----------------------------------------------------------------------------- +// -- Protected methods +// ----------------------------------------------------------------------------- + +#define ALLOCATE(Rank) \ + if(playout->rank() == Rank) { \ + using pimpl_type = buffer::detail_::EigenTensor; \ + auto ppimpl = \ + std::make_unique(playout->shape().as_smooth()); \ + return std::make_unique( \ + std::move(ppimpl), std::move(playout), this->clone()); \ + } TPARAMS -typename EIGEN::base_pointer EIGEN::make_eigen_allocator(unsigned int rank, - runtime_view_type rv) { - ALLOCATE_CONDITION(0); - else ALLOCATE_CONDITION(1); - else ALLOCATE_CONDITION(2); - else ALLOCATE_CONDITION(3); - else ALLOCATE_CONDITION(4); - else ALLOCATE_CONDITION(5); - else ALLOCATE_CONDITION(6); - else ALLOCATE_CONDITION(7); - else ALLOCATE_CONDITION(8); - else ALLOCATE_CONDITION(9); - else ALLOCATE_CONDITION(10); - throw std::runtime_error( - "Presently only support eigen tensors up to rank 10"); +typename EIGEN::buffer_base_pointer EIGEN::allocate_(layout_pointer playout) { + using buffer_type = buffer::Eigen; + ALLOCATE(0) + else ALLOCATE(1) else ALLOCATE(2) else ALLOCATE(3) else ALLOCATE(4) else ALLOCATE(5) else ALLOCATE( + 6) else ALLOCATE(7) else ALLOCATE(8) else ALLOCATE(9) else ALLOCATE(10) else { + throw std::runtime_error("Tensors with rank > 10 not supported."); + } } -#undef ALLOCATE_CONDITION +TPARAMS +typename EIGEN::contiguous_pointer EIGEN::construct_(rank0_il il) { + return il_construct_(il); +} -// ----------------------------------------------------------------------------- -// -- Protected methods -// ----------------------------------------------------------------------------- +TPARAMS +typename EIGEN::contiguous_pointer EIGEN::construct_(rank1_il il) { + return il_construct_(il); +} TPARAMS -typename EIGEN::buffer_base_pointer EIGEN::allocate_(layout_pointer playout) { - auto pderived = detail_::dynamic_pointer_cast(playout); - if(pderived == nullptr) throw std::runtime_error("Unsupported layout"); +typename EIGEN::contiguous_pointer EIGEN::construct_(rank2_il il) { + return il_construct_(il); +} + +TPARAMS +typename EIGEN::contiguous_pointer EIGEN::construct_(rank3_il il) { + return il_construct_(il); +} - return allocate(std::move(pderived)); +TPARAMS +typename EIGEN::contiguous_pointer EIGEN::construct_(rank4_il il) { + return il_construct_(il); +} + +TPARAMS +typename EIGEN::contiguous_pointer EIGEN::construct_(layout_pointer playout, + element_type value) { + auto pbuffer = this->allocate(std::move(playout)); + auto& contig_buffer = static_cast&>(*pbuffer); + auto* pdata = contig_buffer.data(); + std::fill(pdata, pdata + contig_buffer.size(), value); + return pbuffer; +} + +// -- Private + +TPARAMS +template +typename EIGEN::contiguous_pointer EIGEN::il_construct_(ILType il) { + auto [extents, data] = unwrap_il(il); + shape::Smooth shape(extents.begin(), extents.end()); + auto playout = std::make_unique(std::move(shape)); + auto pbuffer = this->allocate(std::move(playout)); + auto& buffer_down = rebind(*pbuffer); + std::copy(data.begin(), data.end(), buffer_down.data()); + return pbuffer; } #undef EIGEN @@ -107,26 +124,9 @@ typename EIGEN::buffer_base_pointer EIGEN::allocate_(layout_pointer playout) { // -- Explicit class template instantiation -#define DEFINE_EIGEN_ALLOCATOR(TYPE) \ - template class Eigen; \ - template class Eigen; \ - template class Eigen; \ - template class Eigen; \ - template class Eigen; \ - template class Eigen; \ - template class Eigen; \ - template class Eigen; \ - template class Eigen; \ - template class Eigen; \ - template class Eigen - -DEFINE_EIGEN_ALLOCATOR(float); -DEFINE_EIGEN_ALLOCATOR(double); - -#ifdef ENABLE_SIGMA -DEFINE_EIGEN_ALLOCATOR(sigma::UFloat); -DEFINE_EIGEN_ALLOCATOR(sigma::UDouble); -#endif +#define DEFINE_EIGEN_ALLOCATOR(TYPE) template class Eigen + +TW_APPLY_FLOATING_POINT_TYPES(DEFINE_EIGEN_ALLOCATOR); #undef DEFINE_EIGEN_ALLOCATOR diff --git a/include/tensorwrapper/backends/backends.hpp b/src/tensorwrapper/backends/backends.hpp similarity index 100% rename from include/tensorwrapper/backends/backends.hpp rename to src/tensorwrapper/backends/backends.hpp diff --git a/include/tensorwrapper/backends/eigen.hpp b/src/tensorwrapper/backends/eigen.hpp similarity index 100% rename from include/tensorwrapper/backends/eigen.hpp rename to src/tensorwrapper/backends/eigen.hpp diff --git a/src/tensorwrapper/buffer/buffer_base.cpp b/src/tensorwrapper/buffer/buffer_base.cpp index dbc923e7..aa048530 100644 --- a/src/tensorwrapper/buffer/buffer_base.cpp +++ b/src/tensorwrapper/buffer/buffer_base.cpp @@ -20,50 +20,61 @@ namespace tensorwrapper::buffer { using dsl_reference = typename BufferBase::dsl_reference; -dsl_reference BufferBase::addition_assignment_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) { - auto llayout = lhs.object().layout()(lhs.labels()); - auto rlayout = rhs.object().layout()(rhs.labels()); +template +dsl_reference BufferBase::binary_op_common_(FxnType&& fxn, + label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) { + const auto& lbuffer = lhs.object(); + const auto& rbuffer = rhs.object(); + + auto llayout = lbuffer.layout()(lhs.labels()); + auto rlayout = rbuffer.layout()(rhs.labels()); - if(!has_layout()) m_layout_ = lhs.object().layout().clone(); + if(!has_layout()) m_layout_ = lbuffer.layout().clone_as(); + if(!has_allocator()) m_allocator_ = lbuffer.allocator().clone(); - m_layout_->addition_assignment(this_labels, llayout, rlayout); + fxn(m_layout_, this_labels, llayout, rlayout); return *this; } +dsl_reference BufferBase::addition_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) { + auto lambda = [](auto&& obj, label_type this_labels, auto&& l, auto&& r) { + obj->addition_assignment(this_labels, l, r); + }; + + return binary_op_common_(lambda, std::move(this_labels), lhs, rhs); +} + dsl_reference BufferBase::subtraction_assignment_(label_type this_labels, const_labeled_reference lhs, const_labeled_reference rhs) { - auto llayout = lhs.object().layout()(lhs.labels()); - auto rlayout = rhs.object().layout()(rhs.labels()); - - if(!has_layout()) m_layout_ = lhs.object().layout().clone(); + auto lambda = [](auto&& obj, label_type this_labels, auto&& l, auto&& r) { + obj->subtraction_assignment(this_labels, l, r); + }; - m_layout_->subtraction_assignment(this_labels, llayout, rlayout); - - return *this; + return binary_op_common_(lambda, std::move(this_labels), lhs, rhs); } dsl_reference BufferBase::multiplication_assignment_( label_type this_labels, const_labeled_reference lhs, const_labeled_reference rhs) { - auto llayout = lhs.object().layout()(lhs.labels()); - auto rlayout = rhs.object().layout()(rhs.labels()); - - if(!has_layout()) m_layout_ = lhs.object().layout().clone(); + auto lambda = [](auto&& obj, label_type this_labels, auto&& l, auto&& r) { + obj->multiplication_assignment(this_labels, l, r); + }; - m_layout_->multiplication_assignment(this_labels, llayout, rlayout); - - return *this; + return binary_op_common_(lambda, std::move(this_labels), lhs, rhs); } dsl_reference BufferBase::permute_assignment_(label_type this_labels, const_labeled_reference rhs) { auto rlayout = rhs.object().layout()(rhs.labels()); - if(!has_layout()) m_layout_ = rhs.object().layout().clone(); + if(!has_layout()) m_layout_ = rhs.object().layout().clone_as(); + if(!has_allocator()) m_allocator_ = rhs.object().allocator().clone(); m_layout_->permute_assignment(this_labels, rlayout); diff --git a/src/tensorwrapper/buffer/detail_/eigen_pimpl.hpp b/src/tensorwrapper/buffer/detail_/eigen_pimpl.hpp new file mode 100644 index 00000000..d19107b6 --- /dev/null +++ b/src/tensorwrapper/buffer/detail_/eigen_pimpl.hpp @@ -0,0 +1,148 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include "../../backends/eigen.hpp" +#include +#include + +namespace tensorwrapper::buffer::detail_ { + +/// Common API that type-erases Eigen's many tensor classes. +template +class EigenPIMPL + : public tensorwrapper::detail_::PolymorphicBase> { +private: + using my_type = EigenPIMPL; + using polymorphic_base = tensorwrapper::detail_::PolymorphicBase; + +public: + using parent_type = Eigen; + using pimpl_pointer = typename parent_type::pimpl_pointer; + using label_type = typename parent_type::label_type; + using reference = typename parent_type::reference; + using const_shape_reference = const shape::ShapeBase&; + using const_reference = typename parent_type::const_reference; + using pointer = typename parent_type::pointer; + using const_pointer = typename parent_type::const_pointer; + using string_type = typename polymorphic_base::string_type; + using index_vector = typename parent_type::index_vector; + using size_type = typename parent_type::size_type; + + using const_pimpl_reference = const EigenPIMPL&; + + using eigen_rank_type = unsigned int; + + eigen_rank_type rank() const noexcept { return rank_(); } + + size_type extent(eigen_rank_type i) const { + assert(i < rank()); + return extent_(i); + } + + pointer data() noexcept { return data_(); } + + const_pointer data() const noexcept { return data_(); } + + reference get_elem(index_vector index) { + assert(index.size() == rank()); + return get_elem_(std::move(index)); + } + + const_reference get_elem(index_vector index) const { + assert(index.size() == rank()); + return get_elem_(std::move(index)); + } + + void addition_assignment(label_type this_labels, label_type lhs_labels, + label_type rhs_labels, const_pimpl_reference lhs, + const_pimpl_reference rhs) { + addition_assignment_(std::move(this_labels), std::move(lhs_labels), + std::move(rhs_labels), lhs, rhs); + } + + void subtraction_assignment(label_type this_labels, label_type lhs_labels, + label_type rhs_labels, + const_pimpl_reference lhs, + const_pimpl_reference rhs) { + subtraction_assignment_(std::move(this_labels), std::move(lhs_labels), + std::move(rhs_labels), lhs, rhs); + } + + void hadamard_assignment(label_type this_labels, label_type lhs_labels, + label_type rhs_labels, const_pimpl_reference lhs, + const_pimpl_reference rhs) { + hadamard_assignment_(std::move(this_labels), std::move(lhs_labels), + std::move(rhs_labels), lhs, rhs); + } + + void contraction_assignment(label_type this_labels, label_type lhs_labels, + label_type rhs_labels, + const_shape_reference result_shape, + const_pimpl_reference lhs, + const_pimpl_reference rhs) { + contraction_assignment_(std::move(this_labels), std::move(lhs_labels), + std::move(rhs_labels), result_shape, lhs, rhs); + } + + void permute_assignment(label_type this_labels, label_type rhs_labels, + const_pimpl_reference rhs) { + permute_assignment_(std::move(this_labels), std::move(rhs_labels), rhs); + } + + void scalar_multiplication(label_type this_labels, label_type rhs_labels, + FloatType scalar, const_pimpl_reference rhs) { + scalar_multiplication_(std::move(this_labels), std::move(rhs_labels), + scalar, rhs); + } + +protected: + virtual eigen_rank_type rank_() const noexcept = 0; + virtual size_type extent_(eigen_rank_type i) const = 0; + virtual pointer data_() noexcept = 0; + virtual const_pointer data_() const noexcept = 0; + virtual reference get_elem_(index_vector index) = 0; + virtual const_reference get_elem_(index_vector index) const = 0; + virtual void addition_assignment_(label_type this_labels, + label_type lhs_labels, + label_type rhs_labels, + const_pimpl_reference lhs, + const_pimpl_reference rhs) = 0; + virtual void subtraction_assignment_(label_type this_labels, + label_type lhs_labels, + label_type rhs_labels, + const_pimpl_reference lhs, + const_pimpl_reference rhs) = 0; + virtual void hadamard_assignment_(label_type this_labels, + label_type lhs_labels, + label_type rhs_labels, + const_pimpl_reference lhs, + const_pimpl_reference rhs) = 0; + virtual void contraction_assignment_(label_type this_labels, + label_type lhs_labels, + label_type rhs_labels, + const_shape_reference result_shape, + const_pimpl_reference lhs, + const_pimpl_reference rhs) = 0; + virtual void permute_assignment_(label_type this_labels, + label_type rhs_labels, + const_pimpl_reference rhs) = 0; + virtual void scalar_multiplication_(label_type this_labels, + label_type rhs_labels, FloatType scalar, + const_pimpl_reference rhs) = 0; +}; + +} // namespace tensorwrapper::buffer::detail_ \ No newline at end of file diff --git a/src/tensorwrapper/buffer/detail_/eigen_tensor.cpp b/src/tensorwrapper/buffer/detail_/eigen_tensor.cpp new file mode 100644 index 00000000..dca92628 --- /dev/null +++ b/src/tensorwrapper/buffer/detail_/eigen_tensor.cpp @@ -0,0 +1,219 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../../backends/eigen.hpp" +#include "../contraction_planner.hpp" +#include "eigen_tensor.hpp" + +namespace tensorwrapper::buffer::detail_ { + +#define TPARAMS template +#define EIGEN_TENSOR EigenTensor + +TPARAMS +template +void EIGEN_TENSOR::element_wise_op_(OperationType op, label_type this_labels, + label_type lhs_labels, + label_type rhs_labels, + const_pimpl_reference lhs, + const_pimpl_reference rhs) { + // Downcast LHS and RHS + const auto* lhs_down = dynamic_cast(&lhs); + const auto& lhs_eigen = lhs_down->value(); + const auto* rhs_down = dynamic_cast(&rhs); + const auto& rhs_eigen = rhs_down->value(); + + // Whose indices match whose? + bool this_matches_lhs = (this_labels == lhs_labels); + bool this_matches_rhs = (this_labels == rhs_labels); + bool lhs_matches_rhs = (lhs_labels == rhs_labels); + + // The three possible permutations we may need to apply + auto get_permutation = [](auto&& lhs_, auto&& rhs_) { + auto l_to_r = lhs_.permutation(rhs_); + return std::vector(l_to_r.begin(), l_to_r.end()); + }; + auto r_to_l = get_permutation(rhs_labels, lhs_labels); + auto l_to_r = get_permutation(lhs_labels, rhs_labels); + auto this_to_r = get_permutation(this_labels, rhs_labels); + + if(this_matches_lhs && this_matches_rhs) { // No permutations + m_tensor_ = op(lhs_eigen, rhs_eigen); + } else if(this_matches_lhs) { // RHS needs permuted + m_tensor_ = op(lhs_eigen, rhs_eigen.shuffle(r_to_l)); + } else if(this_matches_rhs) { // LHS needs permuted + m_tensor_ = op(lhs_eigen.shuffle(l_to_r), rhs_eigen); + } else if(lhs_matches_rhs) { // This needs permuted + m_tensor_ = op(lhs_eigen, rhs_eigen).shuffle(this_to_r); + } else { // Everything needs permuted + m_tensor_ = op(lhs_eigen.shuffle(l_to_r), rhs_eigen).shuffle(this_to_r); + } +} + +TPARAMS +void EIGEN_TENSOR::addition_assignment_(label_type this_labels, + label_type lhs_labels, + label_type rhs_labels, + const_pimpl_reference lhs, + const_pimpl_reference rhs) { + auto lambda = [](auto&& lhs, auto&& rhs) { return lhs + rhs; }; + element_wise_op_(lambda, std::move(this_labels), std::move(lhs_labels), + std::move(rhs_labels), lhs, rhs); +} + +template +auto matrix_size(TensorType&& t, std::size_t row_ranks) { + std::size_t nrows = 1; + for(std::size_t i = 0; i < row_ranks; ++i) nrows *= t.extent(i); + + std::size_t ncols = 1; + const auto rank = t.rank(); + for(std::size_t i = row_ranks; i < rank; ++i) ncols *= t.extent(i); + return std::make_pair(nrows, ncols); +} + +TPARAMS +void EIGEN_TENSOR::contraction_assignment_(label_type olabels, + label_type llabels, + label_type rlabels, + const_shape_reference result_shape, + const_pimpl_reference lhs, + const_pimpl_reference rhs) { + ContractionPlanner plan(olabels, llabels, rlabels); + + auto lt = lhs.clone(); + auto rt = rhs.clone(); + lt->permute_assignment(plan.lhs_permutation(), llabels, lhs); + rt->permute_assignment(plan.rhs_permutation(), rlabels, rhs); + + const auto [lrows, lcols] = matrix_size(*lt, plan.lhs_free().size()); + const auto [rrows, rcols] = matrix_size(*rt, plan.rhs_dummy().size()); + + // Work out the types of the matrix amd a map + constexpr auto e_dyn = ::Eigen::Dynamic; + constexpr auto e_row_major = ::Eigen::RowMajor; + using matrix_t = ::Eigen::Matrix; + using map_t = ::Eigen::Map; + + eigen::data_type buffer(lrows, rcols); + + map_t lmatrix(lt->data(), lrows, lcols); + map_t rmatrix(rt->data(), rrows, rcols); + map_t omatrix(buffer.data(), lrows, rcols); + omatrix = lmatrix * rmatrix; + + auto mlabels = plan.result_matrix_labels(); + auto oshape = result_shape(olabels); + + // oshapes is the final shape, permute it to shape omatrix is currently in + auto temp_shape = result_shape.clone(); + temp_shape->permute_assignment(mlabels, oshape); + auto mshape = temp_shape->as_smooth(); + + auto m_to_o = olabels.permutation(mlabels); // N.b. Eigen def is inverse us + + std::array out_size; + std::array m_to_o_array; + for(std::size_t i = 0; i < Rank; ++i) { + out_size[i] = mshape.extent(i); + m_to_o_array[i] = m_to_o[i]; + } + + auto tensor = buffer.reshape(out_size); + if constexpr(Rank > 0) { + m_tensor_ = tensor.shuffle(m_to_o_array); + } else { + m_tensor_ = tensor; + } +} + +TPARAMS +void EIGEN_TENSOR::hadamard_assignment_(label_type this_labels, + label_type lhs_labels, + label_type rhs_labels, + const_pimpl_reference lhs, + const_pimpl_reference rhs) { + auto lambda = [](auto&& lhs, auto&& rhs) { return lhs * rhs; }; + element_wise_op_(lambda, std::move(this_labels), std::move(lhs_labels), + std::move(rhs_labels), lhs, rhs); +} + +TPARAMS +void EIGEN_TENSOR::permute_assignment_(label_type this_labels, + label_type rhs_labels, + const_pimpl_reference rhs) { + const auto* rhs_down = dynamic_cast(&rhs); + + if(this_labels != rhs_labels) { // We need to permute rhs before assignment + // Eigen adopts the opposite definition of permutation from us. + auto r_to_l = this_labels.permutation(rhs_labels); + // Eigen wants int objects + std::vector r_to_l2(r_to_l.begin(), r_to_l.end()); + m_tensor_ = rhs_down->value().shuffle(r_to_l2); + } else { + m_tensor_ = rhs_down->value(); + } +} + +TPARAMS +void EIGEN_TENSOR::scalar_multiplication_(label_type this_labels, + label_type rhs_labels, + FloatType scalar, + const_pimpl_reference rhs) { + const auto* rhs_downcasted = dynamic_cast(&rhs); + + if(this_labels != rhs_labels) { // We need to permute rhs before assignment + auto r_to_l = rhs_labels.permutation(this_labels); + // Eigen wants int objects + std::vector r_to_l2(r_to_l.begin(), r_to_l.end()); + m_tensor_ = rhs_downcasted->value().shuffle(r_to_l2) * scalar; + } else { + m_tensor_ = rhs_downcasted->value() * scalar; + } +} + +TPARAMS +void EIGEN_TENSOR::subtraction_assignment_(label_type this_labels, + label_type lhs_labels, + label_type rhs_labels, + const_pimpl_reference lhs, + const_pimpl_reference rhs) { + auto lambda = [](auto&& lhs, auto&& rhs) { return lhs - rhs; }; + element_wise_op_(lambda, std::move(this_labels), std::move(lhs_labels), + std::move(rhs_labels), lhs, rhs); +} + +#undef EIGEN_TENSOR +#undef TPARAMS + +#define DEFINE_EIGEN_TENSOR(TYPE) \ + template class EigenTensor; \ + template class EigenTensor; \ + template class EigenTensor; \ + template class EigenTensor; \ + template class EigenTensor; \ + template class EigenTensor; \ + template class EigenTensor; \ + template class EigenTensor; \ + template class EigenTensor; \ + template class EigenTensor; \ + template class EigenTensor + +TW_APPLY_FLOATING_POINT_TYPES(DEFINE_EIGEN_TENSOR); + +#undef DEFINE_EIGEN_TENSOR + +} // namespace tensorwrapper::buffer::detail_ \ No newline at end of file diff --git a/src/tensorwrapper/buffer/detail_/eigen_tensor.hpp b/src/tensorwrapper/buffer/detail_/eigen_tensor.hpp new file mode 100644 index 00000000..ba445ca3 --- /dev/null +++ b/src/tensorwrapper/buffer/detail_/eigen_tensor.hpp @@ -0,0 +1,180 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include "eigen_pimpl.hpp" +#include +#include +#include + +namespace tensorwrapper::buffer::detail_ { + +/// Implements EigenPIMPL by wrapping eigen::Tensor +template +class EigenTensor : public EigenPIMPL { +private: + using my_type = EigenTensor; + using base_type = EigenPIMPL; + +public: + using typename base_type::const_base_reference; + using typename base_type::const_pimpl_reference; + using typename base_type::const_pointer; + using typename base_type::const_reference; + using typename base_type::const_shape_reference; + using typename base_type::eigen_rank_type; + using typename base_type::index_vector; + using typename base_type::label_type; + using typename base_type::pimpl_pointer; + using typename base_type::pointer; + using typename base_type::reference; + using typename base_type::size_type; + using typename base_type::string_type; + + using smooth_view = shape::SmoothView; + using const_smooth_view = shape::SmoothView; + using const_smooth_view_reference = const const_smooth_view&; + using eigen_data_type = eigen::data_type; + using eigen_reference = eigen_data_type&; + using const_eigen_reference = const eigen_data_type&; + + EigenTensor() = default; + + explicit EigenTensor(const_smooth_view_reference shape) : + m_tensor_(allocate_from_shape_(shape, std::make_index_sequence())) { + } + + /// Get a mutable/read-only reference to the Eigen tensor object + ///@{ + eigen_reference value() noexcept { return m_tensor_; } + const_eigen_reference value() const noexcept { return m_tensor_; } + ///@} + + /// Tests for exact equality by taking the difference and comparing to 0.0 + bool operator==(const my_type& rhs) const noexcept { + eigen::data_type r = (m_tensor_ - rhs.m_tensor_).sum(); + return r() == FloatType(0.0); + } + +protected: + pimpl_pointer clone_() const override { + return std::make_unique(*this); + } + + eigen_rank_type rank_() const noexcept override { return Rank; } + + size_type extent_(eigen_rank_type i) const override { + return m_tensor_.dimension(i); + } + + pointer data_() noexcept override { return m_tensor_.data(); } + + const_pointer data_() const noexcept override { return m_tensor_.data(); } + + reference get_elem_(index_vector index) override { + return unwrap_vector_(std::move(index), + std::make_index_sequence()); + } + const_reference get_elem_(index_vector index) const override { + return unwrap_vector_(std::move(index), + std::make_index_sequence()); + } + + bool are_equal_(const_base_reference rhs) const noexcept override { + return base_type::template are_equal_impl_(rhs); + } + + string_type to_string_() const override { + std::stringstream ss; + ss << m_tensor_; + return ss.str(); + } + + void addition_assignment_(label_type this_labels, label_type lhs_labels, + label_type rhs_labels, const_pimpl_reference lhs, + const_pimpl_reference rhs) override; + + void subtraction_assignment_(label_type this_labels, label_type lhs_labels, + label_type rhs_labels, + const_pimpl_reference lhs, + const_pimpl_reference rhs) override; + + void hadamard_assignment_(label_type this_labels, label_type lhs_labels, + label_type rhs_labels, const_pimpl_reference lhs, + const_pimpl_reference rhs) override; + + void contraction_assignment_(label_type this_labels, label_type lhs_labels, + label_type rhs_labels, + const_shape_reference result_shape, + const_pimpl_reference lhs, + const_pimpl_reference rhs) override; + + void permute_assignment_(label_type this_labels, label_type rhs_labels, + const_pimpl_reference rhs) override; + + void scalar_multiplication_(label_type this_labels, label_type rhs_labels, + FloatType scalar, + const_pimpl_reference rhs) override; + +private: + // Code factorization for implementing element-wise operations + template + void element_wise_op_(OperationType op, label_type this_labels, + label_type lhs_labels, label_type rhs_labels, + const_pimpl_reference lhs, const_pimpl_reference rhs); + + // Handles TMP needed to create an Eigen Tensor from a Smooth object + template + auto allocate_from_shape_(const_smooth_view_reference shape, + std::index_sequence) { + return eigen_data_type(shape.extent(I)...); + } + + // Gets an element from the Eigen Tensor by unwrapping a std::vector + template + reference unwrap_vector_(index_vector index, std::index_sequence) { + return m_tensor_(tensorwrapper::detail_::to_long(index.at(I))...); + } + + // Same as mutable version, but result is read-only + template + const_reference unwrap_vector_(index_vector index, + std::index_sequence) const { + return m_tensor_(tensorwrapper::detail_::to_long(index.at(I))...); + } + + // The Eigen tensor *this wraps + eigen_data_type m_tensor_; +}; + +#define DECLARE_EIGEN_TENSOR(TYPE) \ + extern template class EigenTensor; \ + extern template class EigenTensor; \ + extern template class EigenTensor; \ + extern template class EigenTensor; \ + extern template class EigenTensor; \ + extern template class EigenTensor; \ + extern template class EigenTensor; \ + extern template class EigenTensor; \ + extern template class EigenTensor; \ + extern template class EigenTensor; \ + extern template class EigenTensor + +TW_APPLY_FLOATING_POINT_TYPES(DECLARE_EIGEN_TENSOR); + +#undef DECLARE_EIGEN_TENSOR + +} // namespace tensorwrapper::buffer::detail_ \ No newline at end of file diff --git a/src/tensorwrapper/buffer/eigen.cpp b/src/tensorwrapper/buffer/eigen.cpp index 92ed1913..593898fc 100644 --- a/src/tensorwrapper/buffer/eigen.cpp +++ b/src/tensorwrapper/buffer/eigen.cpp @@ -13,8 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "eigen_contraction.hpp" +#include "detail_/eigen_tensor.hpp" #include #include #include @@ -22,52 +21,73 @@ namespace tensorwrapper::buffer { -#define TPARAMS template -#define EIGEN Eigen +#define TPARAMS template +#define EIGEN Eigen + +// -- Public Methods + +TPARAMS +EIGEN::Eigen() noexcept = default; + +TPARAMS +EIGEN::Eigen(pimpl_pointer pimpl, layout_pointer playout, + allocator_base_pointer pallocator) : + my_base_type(std::move(playout), std::move(pallocator)), + m_pimpl_(std::move(pimpl)) {} + +TPARAMS +EIGEN::Eigen(const Eigen& other) : + Eigen(other.has_pimpl_() ? other.m_pimpl_->clone() : nullptr, other.layout(), + other.allocator()) {} + +TPARAMS +EIGEN::Eigen(Eigen&& other) noexcept = default; + +TPARAMS +EIGEN& EIGEN::operator=(const Eigen& rhs) { + if(this != &rhs) Eigen(rhs).swap(*this); + return *this; +} + +TPARAMS +EIGEN& EIGEN::operator=(Eigen&& rhs) noexcept = default; + +TPARAMS +EIGEN::~Eigen() noexcept = default; + +TPARAMS +void EIGEN::swap(Eigen& other) noexcept { m_pimpl_.swap(other.m_pimpl_); } + +TPARAMS +bool EIGEN::operator==(const Eigen& rhs) const noexcept { + if(has_pimpl_() != rhs.has_pimpl_()) return false; + if(!has_pimpl_()) return true; + return m_pimpl_->are_equal(*rhs.m_pimpl_); +} -using const_labeled_reference = - typename Eigen::const_labeled_reference; -using dsl_reference = typename Eigen::dsl_reference; +// -- Protected Methods + +TPARAMS +typename EIGEN::buffer_base_pointer EIGEN::clone_() const { + return std::make_unique(*this); +} + +TPARAMS +bool EIGEN::are_equal_(const_buffer_base_reference rhs) const noexcept { + return my_base_type::template are_equal_impl_(rhs); +} TPARAMS typename EIGEN::dsl_reference EIGEN::addition_assignment_( label_type this_labels, const_labeled_reference lhs, const_labeled_reference rhs) { BufferBase::addition_assignment_(this_labels, lhs, rhs); - - using allocator_type = allocator::Eigen; - const auto& lhs_downcasted = allocator_type::rebind(lhs.object()); - const auto& rhs_downcasted = allocator_type::rebind(rhs.object()); - const auto& lhs_eigen = lhs_downcasted.value(); - const auto& rhs_eigen = rhs_downcasted.value(); - - const auto& lhs_labels = lhs.labels(); - const auto& rhs_labels = rhs.labels(); - - bool this_matches_lhs = (this_labels == lhs_labels); - bool this_matches_rhs = (this_labels == rhs_labels); - bool lhs_matches_rhs = (lhs_labels == rhs_labels); - - auto get_permutation = [](auto&& lhs_, auto&& rhs_) { - auto l_to_r = lhs_.permutation(rhs_); - return std::vector(l_to_r.begin(), l_to_r.end()); - }; - - auto r_to_l = get_permutation(rhs_labels, lhs_labels); - auto l_to_r = get_permutation(lhs_labels, rhs_labels); - auto this_to_r = get_permutation(this_labels, rhs_labels); - - if(this_matches_lhs && this_matches_rhs) { // No permutations - m_tensor_ = lhs_eigen + rhs_eigen; - } else if(this_matches_lhs) { // RHS needs permuted - m_tensor_ = lhs_eigen + rhs_eigen.shuffle(r_to_l); - } else if(this_matches_rhs) { // LHS needs permuted - m_tensor_ = lhs_eigen.shuffle(l_to_r) + rhs_eigen; - } else if(lhs_matches_rhs) { // This needs permuted - m_tensor_ = (lhs_eigen + rhs_eigen).shuffle(this_to_r); - } else { // Everything needs permuted - m_tensor_ = (lhs_eigen.shuffle(l_to_r) + rhs_eigen).shuffle(this_to_r); - } + using alloc_type = allocator::Eigen; + const auto& lhs_down = alloc_type::rebind(lhs.object()); + const auto& rhs_down = alloc_type::rebind(rhs.object()); + if(!has_pimpl_()) m_pimpl_ = lhs_down.pimpl_().clone(); + pimpl_().addition_assignment(this_labels, lhs.labels(), rhs.labels(), + lhs_down.pimpl_(), rhs_down.pimpl_()); return *this; } @@ -77,41 +97,12 @@ typename EIGEN::dsl_reference EIGEN::subtraction_assignment_( label_type this_labels, const_labeled_reference lhs, const_labeled_reference rhs) { BufferBase::subtraction_assignment_(this_labels, lhs, rhs); - - using allocator_type = allocator::Eigen; - const auto& lhs_downcasted = allocator_type::rebind(lhs.object()); - const auto& rhs_downcasted = allocator_type::rebind(rhs.object()); - const auto& lhs_eigen = lhs_downcasted.value(); - const auto& rhs_eigen = rhs_downcasted.value(); - - const auto& lhs_labels = lhs.labels(); - const auto& rhs_labels = rhs.labels(); - - bool this_matches_lhs = (this_labels == lhs_labels); - bool this_matches_rhs = (this_labels == rhs_labels); - bool lhs_matches_rhs = (lhs_labels == rhs_labels); - - auto get_permutation = [](auto&& lhs_, auto&& rhs_) { - auto l_to_r = lhs_.permutation(rhs_); - return std::vector(l_to_r.begin(), l_to_r.end()); - }; - - auto r_to_l = get_permutation(rhs_labels, lhs_labels); - auto l_to_r = get_permutation(lhs_labels, rhs_labels); - auto this_to_r = get_permutation(this_labels, rhs_labels); - - if(this_matches_lhs && this_matches_rhs) { // No permutations - m_tensor_ = lhs_eigen - rhs_eigen; - } else if(this_matches_lhs) { // RHS needs permuted - m_tensor_ = lhs_eigen - rhs_eigen.shuffle(r_to_l); - } else if(this_matches_rhs) { // LHS needs permuted - m_tensor_ = lhs_eigen.shuffle(l_to_r) - rhs_eigen; - } else if(lhs_matches_rhs) { // This needs permuted - m_tensor_ = (lhs_eigen - rhs_eigen).shuffle(this_to_r); - } else { // Everything needs permuted - m_tensor_ = (lhs_eigen.shuffle(l_to_r) - rhs_eigen).shuffle(this_to_r); - } - + using alloc_type = allocator::Eigen; + const auto& lhs_down = alloc_type::rebind(lhs.object()); + const auto& rhs_down = alloc_type::rebind(rhs.object()); + if(!has_pimpl_()) m_pimpl_ = lhs_down.pimpl_().clone(); + pimpl_().subtraction_assignment(this_labels, lhs.labels(), rhs.labels(), + lhs_down.pimpl_(), rhs_down.pimpl_()); return *this; } @@ -121,33 +112,32 @@ typename EIGEN::dsl_reference EIGEN::multiplication_assignment_( const_labeled_reference rhs) { BufferBase::multiplication_assignment_(this_labels, lhs, rhs); + using alloc_type = allocator::Eigen; + const auto& lhs_down = alloc_type::rebind(lhs.object()); + const auto& rhs_down = alloc_type::rebind(rhs.object()); + + if(!has_pimpl_()) m_pimpl_ = lhs_down.pimpl_().clone(); if(this_labels.is_hadamard_product(lhs.labels(), rhs.labels())) - return hadamard_(this_labels, lhs, rhs); + pimpl_().hadamard_assignment(this_labels, lhs.labels(), rhs.labels(), + lhs_down.pimpl_(), rhs_down.pimpl_()); else if(this_labels.is_contraction(lhs.labels(), rhs.labels())) - return contraction_(this_labels, lhs, rhs); + pimpl_().contraction_assignment(this_labels, lhs.labels(), rhs.labels(), + this->layout().shape(), + lhs_down.pimpl_(), rhs_down.pimpl_()); else throw std::runtime_error("Mixed products NYI"); + + return *this; } TPARAMS typename EIGEN::dsl_reference EIGEN::permute_assignment_( label_type this_labels, const_labeled_reference rhs) { BufferBase::permute_assignment_(this_labels, rhs); - - using allocator_type = allocator::Eigen; - const auto& rhs_downcasted = allocator_type::rebind(rhs.object()); - - const auto& rlabels = rhs.labels(); - - if(this_labels != rlabels) { // We need to permute rhs before assignment - // Eigen adopts the opposite definition of permutation from us. - auto r_to_l = this_labels.permutation(rlabels); - // Eigen wants int objects - std::vector r_to_l2(r_to_l.begin(), r_to_l.end()); - m_tensor_ = rhs_downcasted.value().shuffle(r_to_l2); - } else { - m_tensor_ = rhs_downcasted.value(); - } + using alloc_type = allocator::Eigen; + const auto& rhs_down = alloc_type::rebind(rhs.object()); + if(!has_pimpl_()) m_pimpl_ = rhs_down.pimpl_().clone(); + pimpl_().permute_assignment(this_labels, rhs.labels(), rhs_down.pimpl_()); return *this; } @@ -156,103 +146,67 @@ TPARAMS typename EIGEN::dsl_reference EIGEN::scalar_multiplication_( label_type this_labels, double scalar, const_labeled_reference rhs) { BufferBase::permute_assignment_(this_labels, rhs); + using alloc_type = allocator::Eigen; + const auto& rhs_down = alloc_type::rebind(rhs.object()); + if(!has_pimpl_()) m_pimpl_ = rhs_down.pimpl_().clone(); + pimpl_().scalar_multiplication(this_labels, rhs.labels(), scalar, + rhs_down.pimpl_()); + return *this; +} - using allocator_type = allocator::Eigen; - const auto& rhs_downcasted = allocator_type::rebind(rhs.object()); - - const auto& rlabels = rhs.labels(); - - FloatType c(scalar); +TPARAMS +typename EIGEN::pointer EIGEN::data_() noexcept { + return m_pimpl_ ? m_pimpl_->data() : nullptr; +} - if(this_labels != rlabels) { // We need to permute rhs before assignment - auto r_to_l = rhs.labels().permutation(this_labels); - // Eigen wants int objects - std::vector r_to_l2(r_to_l.begin(), r_to_l.end()); - m_tensor_ = rhs_downcasted.value().shuffle(r_to_l2) * c; - } else { - m_tensor_ = rhs_downcasted.value() * c; - } +TPARAMS +typename EIGEN::const_pointer EIGEN::data_() const noexcept { + return m_pimpl_ ? m_pimpl_->data() : nullptr; +} - return *this; +TPARAMS +typename EIGEN::reference EIGEN::get_elem_(index_vector index) { + return pimpl_().get_elem(std::move(index)); } +TPARAMS +typename EIGEN::const_reference EIGEN::get_elem_(index_vector index) const { + return pimpl_().get_elem(std::move(index)); +} TPARAMS typename EIGEN::polymorphic_base::string_type EIGEN::to_string_() const { - std::stringstream ss; - ss << m_tensor_; - return ss.str(); + return m_pimpl_ ? m_pimpl_->to_string() : ""; } +// -- Private methods + TPARAMS -typename EIGEN::dsl_reference EIGEN::hadamard_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) { - using allocator_type = allocator::Eigen; - const auto& lhs_downcasted = allocator_type::rebind(lhs.object()); - const auto& rhs_downcasted = allocator_type::rebind(rhs.object()); - const auto& lhs_eigen = lhs_downcasted.value(); - const auto& rhs_eigen = rhs_downcasted.value(); - - const auto& lhs_labels = lhs.labels(); - const auto& rhs_labels = rhs.labels(); - - bool this_matches_lhs = (this_labels == lhs_labels); - bool this_matches_rhs = (this_labels == rhs_labels); - bool lhs_matches_rhs = (lhs_labels == rhs_labels); - - auto get_permutation = [](auto&& lhs_, auto&& rhs_) { - auto l_to_r = lhs_.permutation(rhs_); - return std::vector(l_to_r.begin(), l_to_r.end()); - }; - - auto r_to_l = get_permutation(rhs_labels, lhs_labels); - auto l_to_r = get_permutation(lhs_labels, rhs_labels); - auto this_to_r = get_permutation(this_labels, rhs_labels); - - if(this_matches_lhs && this_matches_rhs) { // No permutations - m_tensor_ = lhs_eigen * rhs_eigen; - } else if(this_matches_lhs) { // RHS needs permuted - m_tensor_ = lhs_eigen * rhs_eigen.shuffle(r_to_l); - } else if(this_matches_rhs) { // LHS needs permuted - m_tensor_ = lhs_eigen.shuffle(l_to_r) * rhs_eigen; - } else if(lhs_matches_rhs) { // This needs permuted - m_tensor_ = (lhs_eigen * rhs_eigen).shuffle(this_to_r); - } else { // Everything needs permuted - m_tensor_ = (lhs_eigen.shuffle(l_to_r) * rhs_eigen).shuffle(this_to_r); - } +bool EIGEN::has_pimpl_() const noexcept { return static_cast(m_pimpl_); } - return *this; +TPARAMS +void EIGEN::assert_pimpl_() const { + if(has_pimpl_()) return; + throw std::runtime_error("buffer::Eigen has no PIMPL!"); } -TPARAMS typename EIGEN::dsl_reference EIGEN::contraction_( - label_type this_labels, const_labeled_reference lhs, - const_labeled_reference rhs) { - return eigen_contraction(*this, this_labels, lhs, rhs); +TPARAMS +typename EIGEN::pimpl_reference EIGEN::pimpl_() { + assert_pimpl_(); + return *m_pimpl_; +} + +TPARAMS +typename EIGEN::const_pimpl_reference EIGEN::pimpl_() const { + assert_pimpl_(); + return *m_pimpl_; } #undef EIGEN #undef TPARAMS -#define DEFINE_EIGEN_BUFFER(TYPE) \ - template class Eigen; \ - template class Eigen; \ - template class Eigen; \ - template class Eigen; \ - template class Eigen; \ - template class Eigen; \ - template class Eigen; \ - template class Eigen; \ - template class Eigen; \ - template class Eigen; \ - template class Eigen - -DEFINE_EIGEN_BUFFER(float); -DEFINE_EIGEN_BUFFER(double); - -#ifdef ENABLE_SIGMA -DEFINE_EIGEN_BUFFER(sigma::UFloat); -DEFINE_EIGEN_BUFFER(sigma::UDouble); -#endif +#define DEFINE_EIGEN_BUFFER(TYPE) template class Eigen + +TW_APPLY_FLOATING_POINT_TYPES(DEFINE_EIGEN_BUFFER); #undef DEFINE_EIGEN_BUFFER diff --git a/src/tensorwrapper/buffer/eigen_contraction.cpp b/src/tensorwrapper/buffer/eigen_contraction.cpp deleted file mode 100644 index 353d8868..00000000 --- a/src/tensorwrapper/buffer/eigen_contraction.cpp +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "contraction_planner.hpp" -#include "eigen_contraction.hpp" -#include -#include - -namespace tensorwrapper::buffer { - -using rank_type = unsigned short; -using return_type = BufferBase::dsl_reference; -using label_type = BufferBase::label_type; -using const_labeled_reference = BufferBase::const_labeled_reference; - -static constexpr unsigned int max_rank = 10; - -template -FloatType* get_data(TensorType& tensor) { - using allocator_type = allocator::Eigen; - if constexpr(Rank > max_rank) { - const auto sr = std::to_string(max_rank); - const auto msg = "Tensors with rank > " + sr + " are not supported"; - throw std::runtime_error(msg); - } else { - if(tensor.layout().rank() == Rank) { - return allocator_type::rebind(tensor).value().data(); - } else { - return get_data(tensor); - } - } -} - -template -auto matrix_size(TensorType&& t, std::size_t row_ranks) { - const auto shape = t.layout().shape().as_smooth(); - std::size_t nrows = 1; - for(std::size_t i = 0; i < row_ranks; ++i) nrows *= shape.extent(i); - - std::size_t ncols = 1; - const auto rank = shape.rank(); - for(std::size_t i = row_ranks; i < rank; ++i) ncols *= shape.extent(i); - return std::make_pair(nrows, ncols); -} - -template -return_type eigen_contraction(Eigen& result, - label_type olabels, const_labeled_reference lhs, - const_labeled_reference rhs) { - const auto& llabels = lhs.labels(); - const auto& lobject = lhs.object(); - const auto& rlabels = rhs.labels(); - const auto& robject = rhs.object(); - - ContractionPlanner plan(olabels, llabels, rlabels); - auto lt = lobject.clone(); - auto rt = robject.clone(); - lt->permute_assignment(plan.lhs_permutation(), lhs); - rt->permute_assignment(plan.rhs_permutation(), rhs); - const auto [lrows, lcols] = matrix_size(*lt, plan.lhs_free().size()); - const auto [rrows, rcols] = matrix_size(*rt, plan.rhs_dummy().size()); - - // Work out the types of the matrix amd a map - constexpr auto e_dyn = ::Eigen::Dynamic; - constexpr auto e_row_major = ::Eigen::RowMajor; - using matrix_t = ::Eigen::Matrix; - using map_t = ::Eigen::Map; - - typename Eigen::data_type buffer(lrows, rcols); - - map_t lmatrix(get_data(*lt), lrows, lcols); - map_t rmatrix(get_data(*rt), rrows, rcols); - map_t omatrix(buffer.data(), lrows, rcols); - omatrix = lmatrix * rmatrix; - - auto mlabels = plan.result_matrix_labels(); - auto oshape = result.layout().shape()(olabels); - - // oshapes is the final shape, permute it to shape omatrix is currently in - auto temp_shape = result.layout().shape().clone(); - temp_shape->permute_assignment(mlabels, oshape); - auto mshape = temp_shape->as_smooth(); - - auto m_to_o = olabels.permutation(mlabels); // N.b. Eigen def is inverse us - - std::array out_size; - std::array m_to_o_array; - for(std::size_t i = 0; i < Rank; ++i) { - out_size[i] = mshape.extent(i); - m_to_o_array[i] = m_to_o[i]; - } - - auto tensor = buffer.reshape(out_size); - if constexpr(Rank > 0) { - result.value() = tensor.shuffle(m_to_o_array); - } else { - result.value() = tensor; - } - return result; -} - -#define EIGEN_CONTRACTION_(FLOAT_TYPE, RANK) \ - template return_type eigen_contraction( \ - Eigen& result, label_type olabels, \ - const_labeled_reference lhs, const_labeled_reference rhs) - -#define EIGEN_CONTRACTION(FLOAT_TYPE) \ - EIGEN_CONTRACTION_(FLOAT_TYPE, 0); \ - EIGEN_CONTRACTION_(FLOAT_TYPE, 1); \ - EIGEN_CONTRACTION_(FLOAT_TYPE, 2); \ - EIGEN_CONTRACTION_(FLOAT_TYPE, 3); \ - EIGEN_CONTRACTION_(FLOAT_TYPE, 4); \ - EIGEN_CONTRACTION_(FLOAT_TYPE, 5); \ - EIGEN_CONTRACTION_(FLOAT_TYPE, 6); \ - EIGEN_CONTRACTION_(FLOAT_TYPE, 7); \ - EIGEN_CONTRACTION_(FLOAT_TYPE, 8); \ - EIGEN_CONTRACTION_(FLOAT_TYPE, 9); \ - EIGEN_CONTRACTION_(FLOAT_TYPE, 10) - -EIGEN_CONTRACTION(float); -EIGEN_CONTRACTION(double); - -#ifdef ENABLE_SIGMA -EIGEN_CONTRACTION(sigma::UFloat); -EIGEN_CONTRACTION(sigma::UDouble); -#endif - -#undef EIGEN_CONTRACTION -#undef EIGEN_CONTRACTION_ -} // namespace tensorwrapper::buffer \ No newline at end of file diff --git a/src/tensorwrapper/buffer/eigen_contraction.hpp b/src/tensorwrapper/buffer/eigen_contraction.hpp deleted file mode 100644 index 5b4c066b..00000000 --- a/src/tensorwrapper/buffer/eigen_contraction.hpp +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include - -namespace tensorwrapper::buffer { - -/** @brief Relatively template-free API for Eigen tensor contraction. - * - * Eigen's tensor library relies on a heavy amount of template meta-programming - * to implement contract. TensorWrapper strives to do things at runtime. - * Ultimately, to have it both ways we need to create contraction dispatch - * instantiations for every combination of template parameters that Eigen may - * end up seeing, that's what the functions in this header do. - * - */ -template -BufferBase::dsl_reference eigen_contraction( - Eigen& result, BufferBase::label_type olabels, - BufferBase::const_labeled_reference lhs, - BufferBase::const_labeled_reference rhs); - -} // namespace tensorwrapper::buffer \ No newline at end of file diff --git a/src/tensorwrapper/layout/converter.hpp b/src/tensorwrapper/layout/converter.hpp new file mode 100644 index 00000000..af3c3f41 --- /dev/null +++ b/src/tensorwrapper/layout/converter.hpp @@ -0,0 +1,37 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include +#include + +namespace tensorwrapper::layout { + +/** @brief Converts a logical layout into a physical layout. */ +class Converter { +public: + using logical_type = Logical; + using const_logical_reference = const logical_type&; + using physical_type = Physical; + using physical_pointer = std::unique_ptr; + + physical_pointer convert(const_logical_reference logical) { + return std::make_unique( + logical.shape(), logical.symmetry(), logical.sparsity()); + } +}; + +} // namespace tensorwrapper::layout \ No newline at end of file diff --git a/src/tensorwrapper/layout/layout_base.cpp b/src/tensorwrapper/layout/layout_base.cpp index 26fa41c0..3bc44a99 100644 --- a/src/tensorwrapper/layout/layout_base.cpp +++ b/src/tensorwrapper/layout/layout_base.cpp @@ -20,55 +20,63 @@ namespace tensorwrapper::layout { using dsl_reference = typename LayoutBase::dsl_reference; -dsl_reference LayoutBase::addition_assignment_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) { +template +dsl_reference LayoutBase::binary_common_(FxnType&& fxn, label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) { const auto& lobject = lhs.object(); const auto& llabels = lhs.labels(); const auto& robject = rhs.object(); const auto& rlabels = rhs.labels(); - m_shape_->addition_assignment(this_labels, lobject.shape()(llabels), - robject.shape()(rlabels)); - m_sparsity_->addition_assignment(this_labels, lobject.sparsity()(llabels), - robject.sparsity()(rlabels)); - m_symmetry_->addition_assignment(this_labels, lobject.symmetry()(llabels), - robject.symmetry()(rlabels)); + const auto& lshape = lobject.shape(); + const auto& rshape = robject.shape(); + + if(!m_shape_) m_shape_ = lshape.clone(); + fxn(*m_shape_, this_labels, lshape(llabels), rshape(rlabels)); + + const auto& lsymm = lobject.symmetry(); + const auto& rsymm = robject.symmetry(); + if(!m_symmetry_) m_symmetry_ = lsymm.clone(); + fxn(*m_symmetry_, this_labels, lsymm(llabels), rsymm(rlabels)); + + const auto& lsparsity = lobject.sparsity(); + const auto& rsparsity = robject.sparsity(); + if(!m_sparsity_) m_sparsity_ = lsparsity.clone(); + fxn(*m_sparsity_, this_labels, lsparsity(llabels), rsparsity(rlabels)); + return *this; } +dsl_reference LayoutBase::addition_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) { + auto lambda = [](auto&& result, auto&& result_labels, auto&& labeled_lhs, + auto&& labeled_rhs) { + result.addition_assignment(result_labels, labeled_lhs, labeled_rhs); + }; + return binary_common_(lambda, this_labels, lhs, rhs); +} + dsl_reference LayoutBase::subtraction_assignment_(label_type this_labels, const_labeled_reference lhs, const_labeled_reference rhs) { - const auto& lobject = lhs.object(); - const auto& llabels = lhs.labels(); - const auto& robject = rhs.object(); - const auto& rlabels = rhs.labels(); - - m_shape_->subtraction_assignment(this_labels, lobject.shape()(llabels), - robject.shape()(rlabels)); - m_sparsity_->subtraction_assignment( - this_labels, lobject.sparsity()(llabels), robject.sparsity()(rlabels)); - m_symmetry_->subtraction_assignment( - this_labels, lobject.symmetry()(llabels), robject.symmetry()(rlabels)); - return *this; + auto lambda = [](auto&& result, auto&& result_labels, auto&& labeled_lhs, + auto&& labeled_rhs) { + result.subtraction_assignment(result_labels, labeled_lhs, labeled_rhs); + }; + return binary_common_(lambda, this_labels, lhs, rhs); } dsl_reference LayoutBase::multiplication_assignment_( label_type this_labels, const_labeled_reference lhs, const_labeled_reference rhs) { - const auto& lobject = lhs.object(); - const auto& llabels = lhs.labels(); - const auto& robject = rhs.object(); - const auto& rlabels = rhs.labels(); - - m_shape_->multiplication_assignment(this_labels, lobject.shape()(llabels), - robject.shape()(rlabels)); - m_sparsity_->multiplication_assignment( - this_labels, lobject.sparsity()(llabels), robject.sparsity()(rlabels)); - m_symmetry_->multiplication_assignment( - this_labels, lobject.symmetry()(llabels), robject.symmetry()(rlabels)); - return *this; + auto lambda = [](auto&& result, auto&& result_labels, auto&& labeled_lhs, + auto&& labeled_rhs) { + result.multiplication_assignment(result_labels, labeled_lhs, + labeled_rhs); + }; + return binary_common_(lambda, this_labels, lhs, rhs); } dsl_reference LayoutBase::permute_assignment_(label_type this_labels, diff --git a/src/tensorwrapper/operations/approximately_equal.cpp b/src/tensorwrapper/operations/approximately_equal.cpp new file mode 100644 index 00000000..0b662625 --- /dev/null +++ b/src/tensorwrapper/operations/approximately_equal.cpp @@ -0,0 +1,45 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +namespace tensorwrapper::operations { + +bool approximately_equal(const Tensor& lhs, const Tensor& rhs, double tol) { + using allocator_type = allocator::Eigen; + + if(lhs.rank() != rhs.rank()) return false; + + std::string index(lhs.rank() ? "i0" : ""); + for(std::size_t i = 1; i < lhs.rank(); ++i) + index += (",i" + std::to_string(i)); + Tensor result; + result(index) = lhs(index) - rhs(index); + + if(!allocator_type::can_rebind(result.buffer())) + throw std::runtime_error("Buffer is not filled with doubles"); + + auto& buffer_down = allocator_type::rebind(result.buffer()); + + for(std::size_t i = 0; i < buffer_down.size(); ++i) { + auto diff = *(buffer_down.data() + i); + if(std::fabs(diff) >= tol) return false; + } + return true; +} + +} // namespace tensorwrapper::operations \ No newline at end of file diff --git a/src/tensorwrapper/sparsity/pattern.cpp b/src/tensorwrapper/sparsity/pattern.cpp index c4bb9b4e..e625b5de 100644 --- a/src/tensorwrapper/sparsity/pattern.cpp +++ b/src/tensorwrapper/sparsity/pattern.cpp @@ -35,7 +35,7 @@ dsl_reference Pattern::subtraction_assignment_(label_type this_labels, dsl_reference Pattern::multiplication_assignment_(label_type this_labels, const_labeled_reference lhs, const_labeled_reference rhs) { - return permute_assignment_(this_labels, lhs); + return *this = Pattern(this_labels.size()); } dsl_reference Pattern::permute_assignment_(label_type this_labels, diff --git a/src/tensorwrapper/symmetry/group.cpp b/src/tensorwrapper/symmetry/group.cpp index a7f2f0ed..55a94d9b 100644 --- a/src/tensorwrapper/symmetry/group.cpp +++ b/src/tensorwrapper/symmetry/group.cpp @@ -47,7 +47,7 @@ dsl_reference Group::multiplication_assignment_(label_type this_labels, const_labeled_reference lhs, const_labeled_reference rhs) { assert_non_trivial(lhs, rhs); - return permute_assignment_(this_labels, lhs); + return *this = Group(this_labels.size()); } dsl_reference Group::permute_assignment_(label_type this_labels, diff --git a/src/tensorwrapper/tensor/detail_/il_utils.hpp b/src/tensorwrapper/tensor/detail_/il_utils.hpp index e8e3065e..66d9404a 100644 --- a/src/tensorwrapper/tensor/detail_/il_utils.hpp +++ b/src/tensorwrapper/tensor/detail_/il_utils.hpp @@ -24,8 +24,9 @@ namespace { /// Recursion end-point for unwrap_il -auto unwrap_il(double il) { - return std::make_tuple(std::deque{}, std::vector{il}); +template +auto unwrap_il(T il) { + return std::make_tuple(std::deque{}, std::vector{il}); } /** Unwraps a (possibly) recursive initializer list filled with doubles @@ -38,7 +39,7 @@ auto unwrap_il(std::initializer_list il) { using return_types = decltype(unwrap_il(std::declval())); std::optional> rv_dims; - std::vector rv_data; + std::tuple_element_t<1, return_types> rv_data; for(auto b = il.begin(); b != il.end(); ++b) { auto [dims, data] = unwrap_il(*b); diff --git a/src/tensorwrapper/tensor/detail_/tensor_factory.cpp b/src/tensorwrapper/tensor/detail_/tensor_factory.cpp index 5ec9efc7..d7f4c684 100644 --- a/src/tensorwrapper/tensor/detail_/tensor_factory.cpp +++ b/src/tensorwrapper/tensor/detail_/tensor_factory.cpp @@ -69,12 +69,7 @@ physical_layout_pointer TensorFactory::default_physical_layout( allocator_pointer TensorFactory::default_allocator( const_physical_reference physical, runtime_view_type rv) { - // For now, default allocator makes Eigen tensors filled with doubles - const auto rank = physical.shape().rank(); - - // N.B. all specializations implement make_eigen_allocator the same - using eigen_alloc = allocator::Eigen; - return eigen_alloc::make_eigen_allocator(rank, rv); + return std::make_unique>(rv); } bool TensorFactory::can_make_logical_layout(const input_type& input) noexcept { @@ -180,44 +175,33 @@ pimpl_pointer TensorFactory::construct(TensorInput input) { namespace { /// Wraps the process of turning an initializer list into a TensorInput object -template -auto il_to_input(T il, std::index_sequence) { - auto [dims, data] = unwrap_il(il); - - using buffer_type = buffer::Eigen; - using data_type = typename buffer_type::data_type; - - data_type eigen_tensor(dims[I]...); - auto pdata = eigen_tensor.data(); - for(decltype(data.size()) i = 0; i < data.size(); ++i) { - pdata[i] = data[i]; - } - shape::Smooth shape(dims.begin(), dims.end()); - layout::Physical l(shape); - return TensorInput(shape, buffer_type(eigen_tensor, l)); +template +auto il_to_input(T il, parallelzone::runtime::RuntimeView rv = {}) { + allocator::Eigen alloc(rv); + auto pbuffer = alloc.construct(il); + return TensorInput(pbuffer->layout().shape(), std::move(pbuffer)); } } // namespace pimpl_pointer TensorFactory::construct(scalar_il_type il) { - return construct(il_to_input(il, std::make_index_sequence<0>())); + return construct(il_to_input(il)); } pimpl_pointer TensorFactory::construct(vector_il_type il) { - return construct(il_to_input(il, std::make_index_sequence<1>())); - ; + return construct(il_to_input(il)); } pimpl_pointer TensorFactory::construct(matrix_il_type il) { - return construct(il_to_input(il, std::make_index_sequence<2>())); + return construct(il_to_input(il)); } pimpl_pointer TensorFactory::construct(tensor3_il_type il) { - return construct(il_to_input(il, std::make_index_sequence<3>())); + return construct(il_to_input(il)); } pimpl_pointer TensorFactory::construct(tensor4_il_type il) { - return construct(il_to_input(il, std::make_index_sequence<4>())); + return construct(il_to_input(il)); } } // namespace tensorwrapper::detail_ diff --git a/src/tensorwrapper/tensor/tensor_class.cpp b/src/tensorwrapper/tensor/tensor_class.cpp index f27c686e..0001b0db 100644 --- a/src/tensorwrapper/tensor/tensor_class.cpp +++ b/src/tensorwrapper/tensor/tensor_class.cpp @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "../layout/converter.hpp" #include "detail_/tensor_factory.hpp" #include "detail_/tensor_pimpl.hpp" #include @@ -92,10 +93,11 @@ bool Tensor::operator!=(const Tensor& rhs) const noexcept { } // -- Protected methods - -Tensor::dsl_reference Tensor::addition_assignment_( - label_type this_labels, const_labeled_reference lhs, - const_labeled_reference rhs) { +template +Tensor::dsl_reference Tensor::binary_common_(FxnType&& fxn, + label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) { const auto& lobject = lhs.object(); const auto& llabels = lhs.labels(); const auto& robject = rhs.object(); @@ -103,15 +105,21 @@ Tensor::dsl_reference Tensor::addition_assignment_( auto llayout = lobject.logical_layout(); auto rlayout = robject.logical_layout(); - auto pthis_layout = llayout.clone_as(); + auto pthis_layout = std::make_unique(); + // auto pthis_layout = llayout.clone_as(); - pthis_layout->addition_assignment(this_labels, llayout(llabels), - rlayout(rlabels)); + fxn(*pthis_layout, this_labels, llayout(llabels), rlayout(rlabels)); - auto pthis_buffer = lobject.buffer().clone(); - auto lbuffer = lobject.buffer()(llabels); - auto rbuffer = robject.buffer()(rlabels); - pthis_buffer->addition_assignment(this_labels, lbuffer, rbuffer); + layout::Converter c; + auto pphys_layout = c.convert(*pthis_layout); + + const auto& lbuffer = lobject.buffer(); + const auto& rbuffer = robject.buffer(); + + auto palloc = lbuffer.allocator().clone(); + auto pthis_buffer = palloc->allocate(std::move(pphys_layout)); + + fxn(*pthis_buffer, this_labels, lbuffer(llabels), rbuffer(rlabels)); auto new_pimpl = std::make_unique(std::move(pthis_layout), std::move(pthis_buffer)); @@ -120,58 +128,35 @@ Tensor::dsl_reference Tensor::addition_assignment_( return *this; } -Tensor::dsl_reference Tensor::subtraction_assignment_( +Tensor::dsl_reference Tensor::addition_assignment_( label_type this_labels, const_labeled_reference lhs, const_labeled_reference rhs) { - const auto& lobject = lhs.object(); - const auto& llabels = lhs.labels(); - const auto& robject = rhs.object(); - const auto& rlabels = rhs.labels(); - - auto llayout = lobject.logical_layout(); - auto rlayout = robject.logical_layout(); - auto pthis_layout = llayout.clone_as(); - - pthis_layout->subtraction_assignment(this_labels, llayout(llabels), - rlayout(rlabels)); - - auto pthis_buffer = lobject.buffer().clone(); - auto lbuffer = lobject.buffer()(llabels); - auto rbuffer = robject.buffer()(rlabels); - pthis_buffer->subtraction_assignment(this_labels, lbuffer, rbuffer); - - auto new_pimpl = std::make_unique(std::move(pthis_layout), - std::move(pthis_buffer)); - new_pimpl.swap(m_pimpl_); + auto lambda = [](auto&& result, auto&& result_labels, auto&& labeled_lhs, + auto&& labeled_rhs) { + result.addition_assignment(result_labels, labeled_lhs, labeled_rhs); + }; + return binary_common_(lambda, this_labels, lhs, rhs); +} - return *this; +Tensor::dsl_reference Tensor::subtraction_assignment_( + label_type this_labels, const_labeled_reference lhs, + const_labeled_reference rhs) { + auto lambda = [](auto&& result, auto&& result_labels, auto&& labeled_lhs, + auto&& labeled_rhs) { + result.subtraction_assignment(result_labels, labeled_lhs, labeled_rhs); + }; + return binary_common_(lambda, this_labels, lhs, rhs); } Tensor::dsl_reference Tensor::multiplication_assignment_( label_type this_labels, const_labeled_reference lhs, const_labeled_reference rhs) { - const auto& lobject = lhs.object(); - const auto& llabels = lhs.labels(); - const auto& robject = rhs.object(); - const auto& rlabels = rhs.labels(); - - auto llayout = lobject.logical_layout(); - auto rlayout = robject.logical_layout(); - auto pthis_layout = llayout.clone_as(); - - pthis_layout->multiplication_assignment(this_labels, llayout(llabels), - rlayout(rlabels)); - - auto pthis_buffer = lobject.buffer().clone(); - auto lbuffer = lobject.buffer()(llabels); - auto rbuffer = robject.buffer()(rlabels); - pthis_buffer->multiplication_assignment(this_labels, lbuffer, rbuffer); - - auto new_pimpl = std::make_unique(std::move(pthis_layout), - std::move(pthis_buffer)); - new_pimpl.swap(m_pimpl_); - - return *this; + auto lambda = [](auto&& result, auto&& result_labels, auto&& labeled_lhs, + auto&& labeled_rhs) { + result.multiplication_assignment(result_labels, labeled_lhs, + labeled_rhs); + }; + return binary_common_(lambda, this_labels, lhs, rhs); } Tensor::dsl_reference Tensor::scalar_multiplication_( diff --git a/tests/cxx/acceptance_tests/tensorwrapper/acceptance_testing.hpp b/tests/cxx/acceptance_tests/tensorwrapper/acceptance_testing.hpp new file mode 100644 index 00000000..456683b3 --- /dev/null +++ b/tests/cxx/acceptance_tests/tensorwrapper/acceptance_testing.hpp @@ -0,0 +1,22 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include +#include +#include +#include +#include \ No newline at end of file diff --git a/tests/cxx/acceptance_tests/tensorwrapper/contractions.cpp b/tests/cxx/acceptance_tests/tensorwrapper/contractions.cpp new file mode 100644 index 00000000..a792b8a0 --- /dev/null +++ b/tests/cxx/acceptance_tests/tensorwrapper/contractions.cpp @@ -0,0 +1,60 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "acceptance_testing.hpp" + +using namespace tensorwrapper; +using namespace operations; +TEST_CASE("Contractions") { + Tensor defaulted; + + Tensor scalar_0(1.23); + Tensor scalar_1(2.34); + + Tensor vector_0{1.23, 2.34, 3.45}; + Tensor vector_1{4.56, 5.67, 6.78}; + + Tensor matrix_0{{1.23, 2.34}, {3.45, 4.56}}; + Tensor matrix_1{{5.67, 6.78}, {7.89, 8.90}}; + + Tensor tensor3_0{{{1.1, 2.2}, {3.3, 4.4}}, {{5.5, 6.6}, {7.7, 8.8}}}; + Tensor tensor3_1{{{9.9, 10.10}, {11.11, 12.12}}, + {{13.13, 14.14}, {15.15, 16.16}}}; + + Tensor tensor4_0{ + {{{1.1, 2.2}, {3.3, 4.4}}, {{5.5, 6.6}, {7.7, 8.8}}}, + {{{9.9, 10.10}, {11.11, 12.12}}, {{13.13, 14.14}, {15.15, 16.16}}}}; + Tensor tensor4_1{ + {{{17.17, 18.18}, {19.19, 20.20}}, {{21.21, 22.22}, {23.23, 24.24}}}, + {{{25.25, 26.26}, {27.27, 28.28}}, {{29.29, 30.30}, {31.31, 32.32}}}}; + + SECTION("LHS == matrix") { + SECTION("RHS == tensor3") { + SECTION("ij,jkl->ikl") { + defaulted("i,k,l") = matrix_0("i,j") * tensor3_0("j,k,l"); + Tensor corr{ + {{14.222999999999999, 18.15}, {22.076999999999998, 26.004}}, + {{28.875, 37.686}, {46.497, 55.308}}}; + REQUIRE(approximately_equal(defaulted, corr, 1e-10)); + } + SECTION("ij,ijk->k") { + defaulted("k") = matrix_0("i,j") * tensor3_0("i,j,k"); + Tensor corr{63.162, 75.89999999999999}; + REQUIRE(approximately_equal(defaulted, corr, 1e-10)); + } + } + } +} \ No newline at end of file diff --git a/tests/cxx/acceptance_tests/tensorwrapper/contractions.py b/tests/cxx/acceptance_tests/tensorwrapper/contractions.py new file mode 100644 index 00000000..1d1f4a86 --- /dev/null +++ b/tests/cxx/acceptance_tests/tensorwrapper/contractions.py @@ -0,0 +1,45 @@ +# Copyright 2025 NWChemEx-Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np + + +def print_corr(t): + il = str(t.tolist()).replace('[', '{').replace(']', '}') + print('Tensor corr {};'.format(il)) + + +scalar_0 = np.array(1.23) +scalar_1 = np.array(2.34) + +vector_0 = np.array([1.23, 2.34, 3.45]) +vector_1 = np.array([4.56, 5.67, 6.78]) + +matrix_0 = np.array([[1.23, 2.34], [3.45, 4.56]]) +matrix_1 = np.array([[5.67, 6.78], [7.89, 8.90]]) + +tensor3_0 = np.array([[[1.1, 2.2], [3.3, 4.4]], [[5.5, 6.6], [7.7, 8.8]]]) +tensor3_1 = np.array([[[9.9, 10.10], [11.11, 12.12]], + [[13.13, 14.14], [15.15, 16.16]]]) + +tensor4_0 = np.array([[[[1.1, 2.2], [3.3, 4.4]], [[5.5, 6.6], [7.7, 8.8]]], + [[[9.9, 10.10], [11.11, 12.12]], + [[13.13, 14.14], [15.15, 16.16]]]]) +tensor4_1 = np.array([[[[17.17, 18.18], [19.19, 20.20]], + [[21.21, 22.22], [23.23, 24.24]]], + [[[25.25, 26.26], [27.27, 28.28]], + [[29.29, 30.30], [31.31, 32.32]]]]) + +print_corr(np.einsum('ij,jkl->ikl', matrix_0, tensor3_0)) +print_corr(np.einsum('ij,ijk->k', matrix_0, tensor3_0)) diff --git a/tests/cxx/acceptance_tests/tensorwrapper/test_main.cpp b/tests/cxx/acceptance_tests/tensorwrapper/test_main.cpp new file mode 100644 index 00000000..030cf950 --- /dev/null +++ b/tests/cxx/acceptance_tests/tensorwrapper/test_main.cpp @@ -0,0 +1,27 @@ +/* + * Copyright 2024 NWChemEx Community + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define CATCH_CONFIG_RUNNER +#include +#include + +int main(int argc, char* argv[]) { + auto rt = parallelzone::runtime::RuntimeView(argc, argv); + + int res = Catch::Session().run(argc, argv); + + return res; +} diff --git a/tests/cxx/unit_tests/tensorwrapper/allocator/contiguous.cpp b/tests/cxx/unit_tests/tensorwrapper/allocator/contiguous.cpp new file mode 100644 index 00000000..cdf70f29 --- /dev/null +++ b/tests/cxx/unit_tests/tensorwrapper/allocator/contiguous.cpp @@ -0,0 +1,101 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../testing/testing.hpp" +#include + +using namespace tensorwrapper; + +TEMPLATE_LIST_TEST_CASE("allocator::Contiguous", "", + types::floating_point_types) { + using allocator_type = allocator::Contiguous; + using layout_type = typename allocator_type::layout_type; + + auto alloc = testing::make_allocator(); + + auto scalar_corr = testing::eigen_scalar(); + auto vector_corr = testing::eigen_vector(); + auto matrix_corr = testing::eigen_matrix(); + + SECTION("allocate(layout)") { + auto pscalar = alloc.allocate(scalar_corr->layout()); + pscalar->at() = 42.0; + REQUIRE(pscalar->are_equal(*scalar_corr)); + } + + SECTION("allocate(layout*)") { + auto pvector = alloc.allocate(vector_corr->layout()); + pvector->at(0) = 0.0; + pvector->at(1) = 1.0; + pvector->at(2) = 2.0; + pvector->at(3) = 3.0; + pvector->at(4) = 4.0; + + REQUIRE(pvector->are_equal(*vector_corr)); + } + + SECTION("contruct(scalar)") { + auto pscalar = alloc.construct(42.0); + REQUIRE(pscalar->are_equal(*scalar_corr)); + } + + SECTION("construct(vector)") { + auto pvector = alloc.construct({0.0, 1.0, 2.0, 3.0, 4.0}); + REQUIRE(pvector->are_equal(*vector_corr)); + } + + SECTION("construct(matrix)") { + typename allocator_type::rank2_il il{{1.0, 2.0}, {3.0, 4.0}}; + auto pmatrix = alloc.construct(il); + REQUIRE(pmatrix->are_equal(*matrix_corr)); + } + + SECTION("construct(tensor3)") { + typename allocator_type::rank3_il il{{{1.0, 2.0}, {3.0, 4.0}}, + {{5.0, 6.0}, {7.0, 8.0}}}; + auto ptensor3 = alloc.construct(il); + REQUIRE(ptensor3->are_equal(*testing::eigen_tensor3())); + } + + SECTION("construct(tensor4)") { + typename allocator_type::rank4_il il{ + {{{1.0, 2.0}, {3.0, 4.0}}, {{5.0, 6.0}, {7.0, 8.0}}}, + {{{9.0, 10.0}, {11.0, 12.0}}, {{13.0, 14.0}, {15.0, 16.0}}}}; + auto ptensor4 = alloc.construct(il); + REQUIRE(ptensor4->are_equal(*testing::eigen_tensor4())); + } + + SECTION("construct(layout, value)") { + auto pmatrix = alloc.construct(matrix_corr->layout(), 0.0); + matrix_corr->at(0, 0) = 0.0; + matrix_corr->at(0, 1) = 0.0; + matrix_corr->at(1, 0) = 0.0; + matrix_corr->at(1, 1) = 0.0; + + REQUIRE(pmatrix->are_equal(*matrix_corr)); + } + + SECTION("construct(layout*, value)") { + auto pmatrix = alloc.construct( + matrix_corr->layout().template clone_as(), 0.0); + matrix_corr->at(0, 0) = 0.0; + matrix_corr->at(0, 1) = 0.0; + matrix_corr->at(1, 0) = 0.0; + matrix_corr->at(1, 1) = 0.0; + + REQUIRE(pmatrix->are_equal(*matrix_corr)); + } +} \ No newline at end of file diff --git a/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp b/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp index a0eab4c7..948fce69 100644 --- a/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp @@ -22,147 +22,107 @@ using namespace tensorwrapper; -#ifdef ENABLE_SIGMA -using types2test = std::tuple; -#else -using types2test = std::tuple; -#endif +using types2test = types::floating_point_types; TEMPLATE_LIST_TEST_CASE("EigenAllocator", "", types2test) { - using scalar_alloc_type = allocator::Eigen; - using vector_alloc_type = allocator::Eigen; - using matrix_alloc_type = allocator::Eigen; - using eigen_buffer_scalar = typename scalar_alloc_type::eigen_buffer_type; - using eigen_buffer_vector = typename vector_alloc_type::eigen_buffer_type; - using eigen_buffer_matrix = typename matrix_alloc_type::eigen_buffer_type; - using eigen_scalar = typename eigen_buffer_scalar::data_type; - using eigen_vector = typename eigen_buffer_vector::data_type; - using eigen_matrix = typename eigen_buffer_matrix::data_type; + using alloc_type = allocator::Eigen; parallelzone::runtime::RuntimeView rv; - auto scalar_layout = testing::scalar_physical(); auto vector_layout = testing::vector_physical(2); auto matrix_layout = testing::matrix_physical(2, 2); using layout_type = decltype(scalar_layout); - scalar_alloc_type scalar_alloc(rv); - vector_alloc_type vector_alloc(rv); - matrix_alloc_type matrix_alloc(rv); + auto pscalar_corr = testing::eigen_scalar(); + auto& scalar_corr = *pscalar_corr; + scalar_corr.at() = 0.0; - eigen_scalar scalar; - scalar() = 0.0; - eigen_buffer_scalar scalar_corr(scalar, scalar_layout); + auto pvector_corr = testing::eigen_vector(2); + auto& vector_corr = *pvector_corr; + vector_corr.at(0) = 1; + vector_corr.at(1) = 1; - eigen_vector vector(2); - vector.setConstant(1); - eigen_buffer_vector vector_corr(vector, vector_layout); + auto pmatrix_corr = testing::eigen_matrix(2, 2); + auto& matrix_corr = *pmatrix_corr; + matrix_corr.at(0, 0) = 2; + matrix_corr.at(0, 1) = 2; + matrix_corr.at(1, 0) = 2; + matrix_corr.at(1, 1) = 2; - eigen_matrix matrix(2, 2); - matrix.setConstant(2); - eigen_buffer_matrix matrix_corr(matrix, matrix_layout); + alloc_type alloc(rv); SECTION("Ctor") { - SECTION("runtime") { - REQUIRE(scalar_alloc.runtime() == rv); - REQUIRE(vector_alloc.runtime() == rv); - REQUIRE(matrix_alloc.runtime() == rv); - } - - testing::test_copy_and_move_ctors(scalar_alloc, vector_alloc, - matrix_alloc); + SECTION("runtime") { REQUIRE(alloc.runtime() == rv); } + testing::test_copy_and_move_ctors(alloc); } - SECTION("allocate(MonoTile)") { + SECTION("allocate(Layout)") { // N.b. allocate doesn't initialize tensor, so only compare layouts - auto pscalar = scalar_alloc.allocate(scalar_layout); + auto pscalar = alloc.allocate(scalar_layout); REQUIRE(pscalar->layout().are_equal(scalar_layout)); - auto pvector = vector_alloc.allocate(vector_layout); + auto pvector = alloc.allocate(vector_layout); REQUIRE(pvector->layout().are_equal(vector_layout)); - auto pmatrix = matrix_alloc.allocate(matrix_layout); + auto pmatrix = alloc.allocate(matrix_layout); REQUIRE(pmatrix->layout().are_equal(matrix_layout)); - // Throws if ranks don't match - using except_t = std::runtime_error; - REQUIRE_THROWS_AS(scalar_alloc.allocate(vector_layout), except_t); + // Works if ranks don't match + pvector = alloc.allocate(vector_layout); + REQUIRE(pvector->layout().are_equal(vector_layout)); } - SECTION("allocate(std::unique_ptr)") { + SECTION("allocate(std::unique_ptr)") { // N.b. allocate doesn't initialize tensor, so only compare layouts auto pscalar_layout = std::make_unique(scalar_layout); - auto pscalar = scalar_alloc.allocate(std::move(pscalar_layout)); + auto pscalar = alloc.allocate(std::move(pscalar_layout)); REQUIRE(pscalar->layout().are_equal(scalar_layout)); auto pvector_layout = std::make_unique(vector_layout); - auto pvector = vector_alloc.allocate(std::move(pvector_layout)); + auto pvector = alloc.allocate(std::move(pvector_layout)); REQUIRE(pvector->layout().are_equal(vector_layout)); auto pmatrix_layout = std::make_unique(matrix_layout); - auto pmatrix = matrix_alloc.allocate(std::move(pmatrix_layout)); + auto pmatrix = alloc.allocate(std::move(pmatrix_layout)); REQUIRE(pmatrix->layout().are_equal(matrix_layout)); - - // Throws if ranks don't match - using except_t = std::runtime_error; - auto pvector_layout2 = std::make_unique(vector_layout); - REQUIRE_THROWS_AS(scalar_alloc.allocate(std::move(pvector_layout2)), - except_t); } SECTION("construct(value)") { - auto pscalar = scalar_alloc.construct(scalar_layout, 0); + auto pscalar = alloc.construct(scalar_layout, 0); REQUIRE(*pscalar == scalar_corr); - auto pvector = vector_alloc.construct(vector_layout, 1); + auto pvector = alloc.construct(vector_layout, 1); REQUIRE(*pvector == vector_corr); auto pmatrix_layout = std::make_unique(matrix_layout); - auto pmatrix = matrix_alloc.construct(std::move(pmatrix_layout), 2); + auto pmatrix = alloc.construct(std::move(pmatrix_layout), 2); REQUIRE(*pmatrix == matrix_corr); - - // Throws if ranks don't match - using except_t = std::runtime_error; - REQUIRE_THROWS_AS(scalar_alloc.allocate(vector_layout), except_t); } - SECTION("can_rebind") { - REQUIRE(scalar_alloc.can_rebind(scalar_corr)); - REQUIRE_FALSE(scalar_alloc.can_rebind(vector_corr)); - } + SECTION("can_rebind") { REQUIRE(alloc.can_rebind(scalar_corr)); } SECTION("rebind(non-const)") { - using type = typename scalar_alloc_type::buffer_base_reference; + using type = typename alloc_type::buffer_base_reference; type scalar_base = scalar_corr; - auto& eigen_buffer = scalar_alloc.rebind(scalar_base); + auto& eigen_buffer = alloc.rebind(scalar_base); REQUIRE(&eigen_buffer == &scalar_corr); - REQUIRE_THROWS_AS(scalar_alloc.rebind(vector_corr), std::runtime_error); } SECTION("rebind(const)") { - using type = typename scalar_alloc_type::const_buffer_base_reference; + using type = typename alloc_type::const_buffer_base_reference; type scalar_base = scalar_corr; - auto& eigen_buffer = scalar_alloc.rebind(scalar_base); + auto& eigen_buffer = alloc.rebind(scalar_base); REQUIRE(&eigen_buffer == &scalar_corr); - - type vector_base = vector_corr; - REQUIRE_THROWS_AS(scalar_alloc.rebind(vector_base), std::runtime_error); } - SECTION("operator==") { - REQUIRE(scalar_alloc == scalar_alloc_type(rv)); - REQUIRE_FALSE(scalar_alloc == vector_alloc); - } + SECTION("operator==") { REQUIRE(alloc == alloc_type(rv)); } SECTION("virtual_methods") { SECTION("clone") { - auto pscalar = scalar_alloc.clone(); - REQUIRE(pscalar->are_equal(scalar_alloc)); + auto pscalar = alloc.clone(); + REQUIRE(pscalar->are_equal(alloc)); } - SECTION("are_equal") { - REQUIRE(scalar_alloc.are_equal(scalar_alloc_type(rv))); - REQUIRE_FALSE(scalar_alloc.are_equal(vector_alloc)); - } + SECTION("are_equal") { REQUIRE(alloc.are_equal(alloc_type(rv))); } } } diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp index e5928b61..81ff984b 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp @@ -34,23 +34,20 @@ using namespace buffer; */ TEST_CASE("BufferBase") { - using scalar_buffer = buffer::Eigen; - using vector_buffer = buffer::Eigen; + auto pscalar = testing::eigen_scalar(); + auto& scalar = *pscalar; + scalar.at() = 1.0; - typename scalar_buffer::data_type eigen_scalar; - eigen_scalar() = 1.0; + auto pvector = testing::eigen_vector(2); + auto& vector = *pvector; - typename vector_buffer::data_type eigen_vector(2); - eigen_vector(0) = 1.0; - eigen_vector(1) = 2.0; + vector.at(0) = 1.0; + vector.at(1) = 2.0; auto scalar_layout = testing::scalar_physical(); auto vector_layout = testing::vector_physical(2); - vector_buffer defaulted; - scalar_buffer scalar(eigen_scalar, scalar_layout); - vector_buffer vector(eigen_vector, vector_layout); - + buffer::Eigen defaulted; BufferBase& defaulted_base = defaulted; BufferBase& scalar_base = scalar; BufferBase& vector_base = vector; @@ -61,22 +58,30 @@ TEST_CASE("BufferBase") { REQUIRE(vector_base.has_layout()); } + SECTION("has_allocator") { REQUIRE_FALSE(defaulted_base.has_allocator()); } + SECTION("layout") { REQUIRE_THROWS_AS(defaulted_base.layout(), std::runtime_error); REQUIRE(scalar_base.layout().are_equal(scalar_layout)); REQUIRE(vector_base.layout().are_equal(vector_layout)); } + SECTION("allocator()") { + REQUIRE_THROWS_AS(defaulted_base.allocator(), std::runtime_error); + } + + SECTION("allocator() const") { + REQUIRE_THROWS_AS(std::as_const(defaulted_base).allocator(), + std::runtime_error); + } + SECTION("operator==") { // Defaulted layout == defaulted layout - REQUIRE(defaulted_base == scalar_buffer()); + REQUIRE(defaulted_base == buffer::Eigen{}); // Defaulted layout != non-defaulted layout REQUIRE_FALSE(defaulted_base == scalar_base); - // Non-defaulted layout same value - REQUIRE(scalar_base == scalar_buffer(eigen_scalar, scalar_layout)); - // Non-defaulted layout different value REQUIRE_FALSE(scalar_base == vector_base); } @@ -84,6 +89,6 @@ TEST_CASE("BufferBase") { SECTION("operator!=") { // Just spot check because it negates operator==, which was tested REQUIRE(defaulted_base != scalar_base); - REQUIRE_FALSE(defaulted_base != scalar_buffer()); + REQUIRE_FALSE(defaulted_base != buffer::Eigen()); } } diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp index 6c74f741..e3999e87 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp @@ -31,10 +31,12 @@ using namespace buffer; * */ -TEMPLATE_LIST_TEST_CASE("Contiguous", "", testing::floating_point_types) { +TEMPLATE_LIST_TEST_CASE("buffer::Contiguous", "", types::floating_point_types) { using base_type = Contiguous; - auto t0 = testing::eigen_scalar(); - auto t1 = testing::eigen_vector(); + auto pt0 = testing::eigen_scalar(); + auto pt1 = testing::eigen_vector(); + auto& t0 = *pt0; + auto& t1 = *pt1; auto& base0 = static_cast(t0); auto& base1 = static_cast(t1); @@ -63,4 +65,30 @@ TEMPLATE_LIST_TEST_CASE("Contiguous", "", testing::floating_point_types) { REQUIRE(*(std::as_const(base1).data() + 3) == TestType(3.0)); REQUIRE(*(std::as_const(base1).data() + 4) == TestType(4.0)); } + + SECTION("at()") { + REQUIRE(base0.at() == TestType(42.0)); + + REQUIRE(base1.at(0) == TestType(0.0)); + REQUIRE(base1.at(1) == TestType(1.0)); + REQUIRE(base1.at(2) == TestType(2.0)); + REQUIRE(base1.at(3) == TestType(3.0)); + REQUIRE(base1.at(4) == TestType(4.0)); + + REQUIRE_THROWS_AS(base0.at(0), std::runtime_error); + REQUIRE_THROWS_AS(base1.at(0, 1), std::runtime_error); + } + + SECTION("at()const") { + REQUIRE(std::as_const(base0).at() == TestType(42.0)); + + REQUIRE(std::as_const(base1).at(0) == TestType(0.0)); + REQUIRE(std::as_const(base1).at(1) == TestType(1.0)); + REQUIRE(std::as_const(base1).at(2) == TestType(2.0)); + REQUIRE(std::as_const(base1).at(3) == TestType(3.0)); + REQUIRE(std::as_const(base1).at(4) == TestType(4.0)); + + REQUIRE_THROWS_AS(std::as_const(base0).at(0), std::runtime_error); + REQUIRE_THROWS_AS(std::as_const(base1).at(0, 1), std::runtime_error); + } } \ No newline at end of file diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/eigen_tensor.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/eigen_tensor.cpp new file mode 100644 index 00000000..8518a6c7 --- /dev/null +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/eigen_tensor.cpp @@ -0,0 +1,665 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../../testing/testing.hpp" +#include + +using namespace tensorwrapper; +using namespace testing; + +template +using pimpl_type = buffer::detail_::EigenTensor; +using shape_type = shape::Smooth; + +// Should be the same regardless of template parameters +using label_type = typename pimpl_type::label_type; + +TEMPLATE_LIST_TEST_CASE("EigenTensor", "", types::floating_point_types) { + pimpl_type scalar(shape_type{}); + scalar.get_elem({}) = 1.0; + + pimpl_type vector(shape_type{2}); + vector.get_elem({0}) = 1.0; + vector.get_elem({1}) = 2.0; + + pimpl_type matrix(shape_type{2, 2}); + matrix.get_elem({0, 0}) = 1.0; + matrix.get_elem({0, 1}) = 2.0; + matrix.get_elem({1, 0}) = 3.0; + matrix.get_elem({1, 1}) = 4.0; + + pimpl_type tensor(shape_type{2, 2, 2}); + tensor.get_elem({0, 0, 0}) = 1.0; + tensor.get_elem({0, 0, 1}) = 2.0; + tensor.get_elem({0, 1, 0}) = 3.0; + tensor.get_elem({0, 1, 1}) = 4.0; + tensor.get_elem({1, 0, 0}) = 5.0; + tensor.get_elem({1, 0, 1}) = 6.0; + tensor.get_elem({1, 1, 0}) = 7.0; + tensor.get_elem({1, 1, 1}) = 8.0; + + // ------------------------------------------------------------------------- + // -- Public methods + // ------------------------------------------------------------------------- + + SECTION("operator==") { + pimpl_type scalar2(scalar); + REQUIRE(scalar2 == scalar); + + scalar2.get_elem({}) = 42.0; + REQUIRE_FALSE(scalar2 == scalar); + } + + // ------------------------------------------------------------------------- + // -- Protected methods + // ------------------------------------------------------------------------- + + SECTION("clone_") { + REQUIRE(scalar.clone()->are_equal(scalar)); + REQUIRE(vector.clone()->are_equal(vector)); + REQUIRE(matrix.clone()->are_equal(matrix)); + REQUIRE(tensor.clone()->are_equal(tensor)); + } + + SECTION("rank_") { + REQUIRE(scalar.rank() == 0); + REQUIRE(vector.rank() == 1); + REQUIRE(matrix.rank() == 2); + REQUIRE(tensor.rank() == 3); + + pimpl_type defaulted; + REQUIRE(defaulted.rank() == 6); + } + + SECTION("extent_") { + REQUIRE(vector.extent(0) == 2); + + REQUIRE(matrix.extent(0) == 2); + REQUIRE(matrix.extent(1) == 2); + + REQUIRE(tensor.extent(0) == 2); + REQUIRE(tensor.extent(1) == 2); + REQUIRE(tensor.extent(2) == 2); + } + + SECTION("data_()") { + REQUIRE(*scalar.data() == TestType{1.0}); + + REQUIRE(*vector.data() == TestType{1.0}); + REQUIRE(*(vector.data() + 1) == TestType{2.0}); + + REQUIRE(*matrix.data() == TestType{1.0}); + REQUIRE(*(matrix.data() + 1) == TestType{2.0}); + REQUIRE(*(matrix.data() + 2) == TestType{3.0}); + REQUIRE(*(matrix.data() + 3) == TestType{4.0}); + + REQUIRE(*tensor.data() == TestType{1.0}); + REQUIRE(*(tensor.data() + 1) == TestType{2.0}); + REQUIRE(*(tensor.data() + 2) == TestType{3.0}); + REQUIRE(*(tensor.data() + 3) == TestType{4.0}); + REQUIRE(*(tensor.data() + 4) == TestType{5.0}); + REQUIRE(*(tensor.data() + 5) == TestType{6.0}); + REQUIRE(*(tensor.data() + 6) == TestType{7.0}); + REQUIRE(*(tensor.data() + 7) == TestType{8.0}); + } + + SECTION("data_() const") { + REQUIRE(*std::as_const(scalar).data() == TestType{1.0}); + + REQUIRE(*std::as_const(vector).data() == TestType{1.0}); + REQUIRE(*(std::as_const(vector).data() + 1) == TestType{2.0}); + + REQUIRE(*std::as_const(matrix).data() == TestType{1.0}); + REQUIRE(*(std::as_const(matrix).data() + 1) == TestType{2.0}); + REQUIRE(*(std::as_const(matrix).data() + 2) == TestType{3.0}); + REQUIRE(*(std::as_const(matrix).data() + 3) == TestType{4.0}); + + REQUIRE(*std::as_const(tensor).data() == TestType{1.0}); + REQUIRE(*(std::as_const(tensor).data() + 1) == TestType{2.0}); + REQUIRE(*(std::as_const(tensor).data() + 2) == TestType{3.0}); + REQUIRE(*(std::as_const(tensor).data() + 3) == TestType{4.0}); + REQUIRE(*(std::as_const(tensor).data() + 4) == TestType{5.0}); + REQUIRE(*(std::as_const(tensor).data() + 5) == TestType{6.0}); + REQUIRE(*(std::as_const(tensor).data() + 6) == TestType{7.0}); + REQUIRE(*(std::as_const(tensor).data() + 7) == TestType{8.0}); + } + + SECTION("get_elem_ ()") { + REQUIRE(scalar.get_elem({}) == TestType{1.0}); + + REQUIRE(vector.get_elem({0}) == TestType{1.0}); + REQUIRE(vector.get_elem({1}) == TestType{2.0}); + + REQUIRE(matrix.get_elem({0, 0}) == TestType{1.0}); + REQUIRE(matrix.get_elem({0, 1}) == TestType{2.0}); + REQUIRE(matrix.get_elem({1, 0}) == TestType{3.0}); + REQUIRE(matrix.get_elem({1, 1}) == TestType{4.0}); + + REQUIRE(tensor.get_elem({0, 0, 0}) == TestType{1.0}); + REQUIRE(tensor.get_elem({0, 0, 1}) == TestType{2.0}); + REQUIRE(tensor.get_elem({0, 1, 0}) == TestType{3.0}); + REQUIRE(tensor.get_elem({0, 1, 1}) == TestType{4.0}); + REQUIRE(tensor.get_elem({1, 0, 0}) == TestType{5.0}); + REQUIRE(tensor.get_elem({1, 0, 1}) == TestType{6.0}); + REQUIRE(tensor.get_elem({1, 1, 0}) == TestType{7.0}); + REQUIRE(tensor.get_elem({1, 1, 1}) == TestType{8.0}); + } + + SECTION("get_elem_() const") { + REQUIRE(std::as_const(scalar).get_elem({}) == TestType{1.0}); + + REQUIRE(std::as_const(vector).get_elem({0}) == TestType{1.0}); + REQUIRE(std::as_const(vector).get_elem({1}) == TestType{2.0}); + + REQUIRE(std::as_const(matrix).get_elem({0, 0}) == TestType{1.0}); + REQUIRE(std::as_const(matrix).get_elem({0, 1}) == TestType{2.0}); + REQUIRE(std::as_const(matrix).get_elem({1, 0}) == TestType{3.0}); + REQUIRE(std::as_const(matrix).get_elem({1, 1}) == TestType{4.0}); + + REQUIRE(std::as_const(tensor).get_elem({0, 0, 0}) == TestType{1.0}); + REQUIRE(std::as_const(tensor).get_elem({0, 0, 1}) == TestType{2.0}); + REQUIRE(std::as_const(tensor).get_elem({0, 1, 0}) == TestType{3.0}); + REQUIRE(std::as_const(tensor).get_elem({0, 1, 1}) == TestType{4.0}); + REQUIRE(std::as_const(tensor).get_elem({1, 0, 0}) == TestType{5.0}); + REQUIRE(std::as_const(tensor).get_elem({1, 0, 1}) == TestType{6.0}); + REQUIRE(std::as_const(tensor).get_elem({1, 1, 0}) == TestType{7.0}); + REQUIRE(std::as_const(tensor).get_elem({1, 1, 1}) == TestType{8.0}); + } + + SECTION("are_equal_") { + pimpl_type scalar2(scalar); + REQUIRE(scalar2.are_equal(scalar)); + + scalar2.get_elem({}) = 42.0; + REQUIRE_FALSE(scalar2.are_equal(scalar)); + } + + SECTION("to_string_") { + std::stringstream sone; + sone << TestType{1.0}; + + std::stringstream stwo; + stwo << TestType{2.0}; + + REQUIRE(scalar.to_string() == sone.str()); + REQUIRE(vector.to_string() == sone.str() + " " + stwo.str()); + } + + SECTION("addition_assignment_") { + SECTION("scalar") { + pimpl_type output; + label_type s(""); + output.addition_assignment(s, s, s, scalar, scalar); + + pimpl_type corr(shape_type{}); + corr.get_elem({}) = 2.0; + REQUIRE(output == corr); + } + + SECTION("tensor : permute none") { + pimpl_type output; + label_type o("i,j,k"); + label_type l("i,j,k"); + label_type r("i,j,k"); + + output.addition_assignment(o, l, r, tensor, tensor); + + pimpl_type corr(shape_type{2, 2, 2}); + corr.get_elem({0, 0, 0}) = 2.0; + corr.get_elem({0, 0, 1}) = 4.0; + corr.get_elem({0, 1, 0}) = 6.0; + corr.get_elem({0, 1, 1}) = 8.0; + corr.get_elem({1, 0, 0}) = 10.0; + corr.get_elem({1, 0, 1}) = 12.0; + corr.get_elem({1, 1, 0}) = 14.0; + corr.get_elem({1, 1, 1}) = 16.0; + REQUIRE(output == corr); + } + + SECTION("tensor : permute LHS") { + pimpl_type output; + label_type o("k,j,i"); + label_type l("i,j,k"); + label_type r("k,j,i"); + + output.addition_assignment(o, l, r, tensor, tensor); + + pimpl_type corr(shape_type{2, 2, 2}); + corr.get_elem({0, 0, 0}) = 2.0; + corr.get_elem({0, 0, 1}) = 8.0; + corr.get_elem({0, 1, 0}) = 8.0; + corr.get_elem({0, 1, 1}) = 14.0; + corr.get_elem({1, 0, 0}) = 4.0; + corr.get_elem({1, 0, 1}) = 10.0; + corr.get_elem({1, 1, 0}) = 10.0; + corr.get_elem({1, 1, 1}) = 16.0; + REQUIRE(output == corr); + } + + SECTION("tensor : permute RHS") { + pimpl_type output; + label_type o("k,j,i"); + label_type l("k,j,i"); + label_type r("i,j,k"); + + output.addition_assignment(o, l, r, tensor, tensor); + + pimpl_type corr(shape_type{2, 2, 2}); + corr.get_elem({0, 0, 0}) = 2.0; + corr.get_elem({0, 0, 1}) = 8.0; + corr.get_elem({0, 1, 0}) = 8.0; + corr.get_elem({0, 1, 1}) = 14.0; + corr.get_elem({1, 0, 0}) = 4.0; + corr.get_elem({1, 0, 1}) = 10.0; + corr.get_elem({1, 1, 0}) = 10.0; + corr.get_elem({1, 1, 1}) = 16.0; + REQUIRE(output == corr); + } + + SECTION("tensor : permute all") { + pimpl_type output; + label_type o("k,j,i"); + label_type l("i,j,k"); + label_type r("j,i,k"); + + output.addition_assignment(o, l, r, tensor, tensor); + + pimpl_type corr(shape_type{2, 2, 2}); + corr.get_elem({0, 0, 0}) = 2.0; + corr.get_elem({0, 0, 1}) = 8.0; + corr.get_elem({0, 1, 0}) = 8.0; + corr.get_elem({0, 1, 1}) = 14.0; + corr.get_elem({1, 0, 0}) = 4.0; + corr.get_elem({1, 0, 1}) = 10.0; + corr.get_elem({1, 1, 0}) = 10.0; + corr.get_elem({1, 1, 1}) = 16.0; + REQUIRE(output == corr); + } + } + + SECTION("subtraction_assignment_") { + SECTION("scalar") { + pimpl_type output; + label_type s(""); + output.subtraction_assignment(s, s, s, scalar, scalar); + + pimpl_type corr(shape_type{}); + corr.get_elem({}) = 0.0; + REQUIRE(output == corr); + } + + SECTION("tensor : permute none") { + pimpl_type output; + label_type o("i,j,k"); + label_type l("i,j,k"); + label_type r("i,j,k"); + + output.subtraction_assignment(o, l, r, tensor, tensor); + + pimpl_type corr(shape_type{2, 2, 2}); + corr.get_elem({0, 0, 0}) = 0.0; + corr.get_elem({0, 0, 1}) = 0.0; + corr.get_elem({0, 1, 0}) = 0.0; + corr.get_elem({0, 1, 1}) = 0.0; + corr.get_elem({1, 0, 0}) = 0.0; + corr.get_elem({1, 0, 1}) = 0.0; + corr.get_elem({1, 1, 0}) = 0.0; + corr.get_elem({1, 1, 1}) = 0.0; + REQUIRE(output == corr); + } + + SECTION("tensor : permute LHS") { + pimpl_type output; + label_type o("k,j,i"); + label_type l("i,j,k"); + label_type r("k,j,i"); + + output.subtraction_assignment(o, l, r, tensor, tensor); + + pimpl_type corr(shape_type{2, 2, 2}); + corr.get_elem({0, 0, 0}) = 0.0; + corr.get_elem({0, 0, 1}) = 3.0; + corr.get_elem({0, 1, 0}) = 0.0; + corr.get_elem({0, 1, 1}) = 3.0; + corr.get_elem({1, 0, 0}) = -3.0; + corr.get_elem({1, 0, 1}) = 0.0; + corr.get_elem({1, 1, 0}) = -3.0; + corr.get_elem({1, 1, 1}) = 0.0; + REQUIRE(output == corr); + } + + SECTION("tensor : permute RHS") { + pimpl_type output; + label_type o("k,j,i"); + label_type l("k,j,i"); + label_type r("i,j,k"); + + output.subtraction_assignment(o, l, r, tensor, tensor); + + pimpl_type corr(shape_type{2, 2, 2}); + corr.get_elem({0, 0, 0}) = 0.0; + corr.get_elem({0, 0, 1}) = -3.0; + corr.get_elem({0, 1, 0}) = 0.0; + corr.get_elem({0, 1, 1}) = -3.0; + corr.get_elem({1, 0, 0}) = 3.0; + corr.get_elem({1, 0, 1}) = 0.0; + corr.get_elem({1, 1, 0}) = 3.0; + corr.get_elem({1, 1, 1}) = 0.0; + REQUIRE(output == corr); + } + + SECTION("tensor : permute all") { + pimpl_type output; + label_type o("k,j,i"); + label_type l("i,j,k"); + label_type r("j,i,k"); + + output.subtraction_assignment(o, l, r, tensor, tensor); + + pimpl_type corr(shape_type{2, 2, 2}); + corr.get_elem({0, 0, 0}) = 0.0; + corr.get_elem({0, 0, 1}) = 2.0; + corr.get_elem({0, 1, 0}) = -2.0; + corr.get_elem({0, 1, 1}) = 0.0; + corr.get_elem({1, 0, 0}) = 0.0; + corr.get_elem({1, 0, 1}) = 2.0; + corr.get_elem({1, 1, 0}) = -2.0; + corr.get_elem({1, 1, 1}) = 0.0; + REQUIRE(output == corr); + } + } + + SECTION("hadamard_assignment_") { + SECTION("scalar") { + pimpl_type output; + label_type s(""); + output.hadamard_assignment(s, s, s, scalar, scalar); + + pimpl_type corr(shape_type{}); + corr.get_elem({}) = 1.0; + REQUIRE(output == corr); + } + + SECTION("tensor : permute none") { + pimpl_type output; + label_type o("i,j,k"); + label_type l("i,j,k"); + label_type r("i,j,k"); + + output.hadamard_assignment(o, l, r, tensor, tensor); + + pimpl_type corr(shape_type{2, 2, 2}); + corr.get_elem({0, 0, 0}) = 1.0; + corr.get_elem({0, 0, 1}) = 4.0; + corr.get_elem({0, 1, 0}) = 9.0; + corr.get_elem({0, 1, 1}) = 16.0; + corr.get_elem({1, 0, 0}) = 25.0; + corr.get_elem({1, 0, 1}) = 36.0; + corr.get_elem({1, 1, 0}) = 49.0; + corr.get_elem({1, 1, 1}) = 64.0; + REQUIRE(output == corr); + } + + SECTION("tensor : permute LHS") { + pimpl_type output; + label_type o("k,j,i"); + label_type l("i,j,k"); + label_type r("k,j,i"); + + output.hadamard_assignment(o, l, r, tensor, tensor); + + pimpl_type corr(shape_type{2, 2, 2}); + corr.get_elem({0, 0, 0}) = 1.0; + corr.get_elem({0, 0, 1}) = 10.0; + corr.get_elem({0, 1, 0}) = 9.0; + corr.get_elem({0, 1, 1}) = 28.0; + corr.get_elem({1, 0, 0}) = 10.0; + corr.get_elem({1, 0, 1}) = 36.0; + corr.get_elem({1, 1, 0}) = 28.0; + corr.get_elem({1, 1, 1}) = 64.0; + REQUIRE(output == corr); + } + + SECTION("tensor : permute RHS") { + pimpl_type output; + label_type o("k,j,i"); + label_type l("k,j,i"); + label_type r("i,j,k"); + + output.hadamard_assignment(o, l, r, tensor, tensor); + + pimpl_type corr(shape_type{2, 2, 2}); + corr.get_elem({0, 0, 0}) = 1.0; + corr.get_elem({0, 0, 1}) = 10.0; + corr.get_elem({0, 1, 0}) = 9.0; + corr.get_elem({0, 1, 1}) = 28.0; + corr.get_elem({1, 0, 0}) = 10.0; + corr.get_elem({1, 0, 1}) = 36.0; + corr.get_elem({1, 1, 0}) = 28.0; + corr.get_elem({1, 1, 1}) = 64.0; + REQUIRE(output == corr); + } + + SECTION("tensor : permute all") { + pimpl_type output; + label_type o("k,j,i"); + label_type l("i,j,k"); + label_type r("j,i,k"); + + output.hadamard_assignment(o, l, r, tensor, tensor); + + pimpl_type corr(shape_type{2, 2, 2}); + corr.get_elem({0, 0, 0}) = 1.0; + corr.get_elem({0, 0, 1}) = 15.0; + corr.get_elem({0, 1, 0}) = 15.0; + corr.get_elem({0, 1, 1}) = 49.0; + corr.get_elem({1, 0, 0}) = 4.0; + corr.get_elem({1, 0, 1}) = 24.0; + corr.get_elem({1, 1, 0}) = 24.0; + corr.get_elem({1, 1, 1}) = 64.0; + REQUIRE(output == corr); + } + } + + SECTION("contraction_assignment") { + SECTION("ijk,ijk->") { + pimpl_type output; + + label_type o(""); + label_type l("i,j,k"); + label_type r("i,j,k"); + shape_type oshape{}; + output.contraction_assignment(o, l, r, oshape, tensor, tensor); + + pimpl_type corr(oshape); + corr.get_elem({}) = 204.0; + REQUIRE(output == corr); + } + + SECTION("ijk,jik->") { + pimpl_type output; + + label_type o(""); + label_type l("i,j,k"); + label_type r("j,i,k"); + shape_type oshape{}; + output.contraction_assignment(o, l, r, oshape, tensor, tensor); + + pimpl_type corr(oshape); + corr.get_elem({}) = 196.0; + REQUIRE(output == corr); + } + + SECTION("ijk,jkl->il") { + pimpl_type output; + + label_type o("i,l"); + label_type l("i,j,k"); + label_type r("j,k,l"); + shape_type oshape{2, 2}; + output.contraction_assignment(o, l, r, oshape, tensor, tensor); + + pimpl_type corr(oshape); + corr.get_elem({0, 0}) = 50.0; + corr.get_elem({0, 1}) = 60.0; + corr.get_elem({1, 0}) = 114.0; + corr.get_elem({1, 1}) = 140.0; + REQUIRE(output == corr); + } + + SECTION("ijk,jlk->il") { + pimpl_type output; + + label_type o("i,l"); + label_type l("i,j,k"); + label_type r("j,l,k"); + shape_type oshape{2, 2}; + output.contraction_assignment(o, l, r, oshape, tensor, tensor); + + pimpl_type corr(oshape); + corr.get_elem({0, 0}) = 44.0; + corr.get_elem({0, 1}) = 64.0; + corr.get_elem({1, 0}) = 100.0; + corr.get_elem({1, 1}) = 152.0; + REQUIRE(output == corr); + } + + SECTION("ijk,jlk->li") { + pimpl_type output; + + label_type o("l,i"); + label_type l("i,j,k"); + label_type r("j,l,k"); + shape_type oshape{2, 2}; + output.contraction_assignment(o, l, r, oshape, tensor, tensor); + + pimpl_type corr(oshape); + corr.get_elem({0, 0}) = 44.0; + corr.get_elem({0, 1}) = 100.0; + corr.get_elem({1, 0}) = 64.0; + corr.get_elem({1, 1}) = 152.0; + REQUIRE(output == corr); + } + + SECTION("ijk,ljm->iklm") { + pimpl_type output; + + label_type o("i,k,l,m"); + label_type l("i,j,k"); + label_type r("l,j,m"); + shape_type oshape{2, 2, 2, 2}; + output.contraction_assignment(o, l, r, oshape, tensor, tensor); + + pimpl_type corr(oshape); + corr.get_elem({0, 0, 0, 0}) = 10.0; + corr.get_elem({0, 0, 0, 1}) = 14.0; + corr.get_elem({0, 0, 1, 0}) = 26.0; + corr.get_elem({0, 0, 1, 1}) = 30.0; + corr.get_elem({0, 1, 0, 0}) = 14.0; + corr.get_elem({0, 1, 0, 1}) = 20.0; + corr.get_elem({0, 1, 1, 0}) = 38.0; + corr.get_elem({0, 1, 1, 1}) = 44.0; + corr.get_elem({1, 0, 0, 0}) = 26.0; + corr.get_elem({1, 0, 0, 1}) = 38.0; + corr.get_elem({1, 0, 1, 0}) = 74.0; + corr.get_elem({1, 0, 1, 1}) = 86.0; + corr.get_elem({1, 1, 0, 0}) = 30.0; + corr.get_elem({1, 1, 0, 1}) = 44.0; + corr.get_elem({1, 1, 1, 0}) = 86.0; + corr.get_elem({1, 1, 1, 1}) = 100.0; + + REQUIRE(output == corr); + } + + SECTION("ij,jkl->ikl") { + pimpl_type output; + + label_type o("i,k,l"); + label_type l("i,j"); + label_type r("j,k,l"); + shape_type oshape{2, 2, 2}; + output.contraction_assignment(o, l, r, oshape, matrix, tensor); + + pimpl_type corr(oshape); + corr.get_elem({0, 0, 0}) = 11.0; + corr.get_elem({0, 0, 1}) = 14.0; + corr.get_elem({0, 1, 0}) = 17.0; + corr.get_elem({0, 1, 1}) = 20.0; + corr.get_elem({1, 0, 0}) = 23.0; + corr.get_elem({1, 0, 1}) = 30.0; + corr.get_elem({1, 1, 0}) = 37.0; + corr.get_elem({1, 1, 1}) = 44.0; + + REQUIRE(corr == output); + } + } + + SECTION("permute_assignment") { + pimpl_type output; + + SECTION("matrix : no permute") { + label_type o("i,j"); + label_type i("i,j"); + output.permute_assignment(o, i, matrix); + + REQUIRE(output == matrix); + } + + SECTION("matrix : permute") { + label_type o("i,j"); + label_type i("j,i"); + output.permute_assignment(o, i, matrix); + + pimpl_type corr(shape_type{2, 2}); + corr.get_elem({0, 0}) = 1.0; + corr.get_elem({0, 1}) = 3.0; + corr.get_elem({1, 0}) = 2.0; + corr.get_elem({1, 1}) = 4.0; + REQUIRE(output == corr); + } + } + + SECTION("scalar_multiplication") { + pimpl_type output; + + SECTION("matrix : no permute") { + label_type o("i,j"); + label_type i("i,j"); + output.scalar_multiplication(o, i, 2.0, matrix); + + pimpl_type corr(shape_type{2, 2}); + corr.get_elem({0, 0}) = 2.0; + corr.get_elem({0, 1}) = 4.0; + corr.get_elem({1, 0}) = 6.0; + corr.get_elem({1, 1}) = 8.0; + + REQUIRE(output == corr); + } + + SECTION("matrix : permute") { + label_type o("i,j"); + label_type i("j,i"); + output.scalar_multiplication(o, i, 2.0, matrix); + + pimpl_type corr(shape_type{2, 2}); + corr.get_elem({0, 0}) = 2.0; + corr.get_elem({0, 1}) = 6.0; + corr.get_elem({1, 0}) = 4.0; + corr.get_elem({1, 1}) = 8.0; + REQUIRE(output == corr); + } + } +} diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp index 1e52cc3a..c8d369b8 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp @@ -22,678 +22,191 @@ using namespace tensorwrapper; using namespace testing; -namespace { - -template -void compare_eigen(const LHSType& lhs, const RHSType& rhs) { - using r_type = Eigen::Tensor; - auto d = lhs - rhs; - r_type r = d.sum(); - // sigma's types are the only none fundamental options here currently - // This may need to be adjusted if other custom numeric types are added - if constexpr(std::is_fundamental_v) { - REQUIRE_THAT(r() + 1.0, Catch::Matchers::WithinAbs(1.0, 1E-6)); - } else { - REQUIRE_THAT(r().mean() + 1.0, Catch::Matchers::WithinAbs(1.0, 1E-6)); - REQUIRE_THAT(r().sd() + 1.0, Catch::Matchers::WithinAbs(1.0, 1E-6)); - } -} - -} // namespace - -TEMPLATE_LIST_TEST_CASE("Eigen", "", testing::floating_point_types) { - using scalar_buffer = buffer::Eigen; - using vector_buffer = buffer::Eigen; - using matrix_buffer = buffer::Eigen; - using tensor_buffer = buffer::Eigen; - - typename scalar_buffer::data_type eigen_scalar; - eigen_scalar() = 10.0; - - typename vector_buffer::data_type eigen_vector(2); - eigen_vector(0) = 10.0; - eigen_vector(1) = 20.0; - - typename matrix_buffer::data_type eigen_matrix(2, 3); - eigen_matrix(0, 0) = 10.0; - eigen_matrix(0, 1) = 20.0; - eigen_matrix(0, 2) = 30.0; - eigen_matrix(1, 0) = 40.0; - eigen_matrix(1, 1) = 50.0; - eigen_matrix(1, 2) = 60.0; - - typename tensor_buffer::data_type eigen_tensor(1, 2, 3); - eigen_tensor(0, 0, 0) = 10.0; - eigen_tensor(0, 0, 1) = 20.0; - eigen_tensor(0, 0, 2) = 30.0; - eigen_tensor(0, 1, 0) = 40.0; - eigen_tensor(0, 1, 1) = 50.0; - eigen_tensor(0, 1, 2) = 60.0; +TEMPLATE_LIST_TEST_CASE("Eigen", "", types::floating_point_types) { + // N.B. we actually get Contiguous objects back + using buffer_type = buffer::Eigen; + + auto pscalar = testing::eigen_scalar(); + auto& eigen_scalar = static_cast(*pscalar); + eigen_scalar.at() = 10.0; + + auto pvector = testing::eigen_vector(2); + auto& eigen_vector = static_cast(*pvector); + eigen_vector.at(0) = 10.0; + eigen_vector.at(1) = 20.0; + + auto pmatrix = testing::eigen_matrix(2, 3); + auto& eigen_matrix = static_cast(*pmatrix); + eigen_matrix.at(0, 0) = 10.0; + eigen_matrix.at(0, 1) = 20.0; + eigen_matrix.at(0, 2) = 30.0; + eigen_matrix.at(1, 0) = 40.0; + eigen_matrix.at(1, 1) = 50.0; + eigen_matrix.at(1, 2) = 60.0; + + auto ptensor = testing::eigen_tensor3(1, 2, 3); + auto& eigen_tensor = static_cast(*ptensor); + eigen_tensor.at(0, 0, 0) = 10.0; + eigen_tensor.at(0, 0, 1) = 20.0; + eigen_tensor.at(0, 0, 2) = 30.0; + eigen_tensor.at(0, 1, 0) = 40.0; + eigen_tensor.at(0, 1, 1) = 50.0; + eigen_tensor.at(0, 1, 2) = 60.0; auto scalar_layout = scalar_physical(); auto vector_layout = vector_physical(2); auto matrix_layout = matrix_physical(2, 3); auto tensor_layout = tensor3_physical(1, 2, 3); - scalar_buffer scalar(eigen_scalar, scalar_layout); - vector_buffer vector(eigen_vector, vector_layout); - matrix_buffer matrix(eigen_matrix, matrix_layout); - tensor_buffer tensor(eigen_tensor, tensor_layout); + buffer_type defaulted; SECTION("ctors, assignment") { - SECTION("value ctor") { - compare_eigen(scalar.value(), eigen_scalar); - REQUIRE(scalar.layout().are_equal(scalar_layout)); + SECTION("default ctor") { REQUIRE(defaulted.data() == nullptr); } - compare_eigen(vector.value(), eigen_vector); - REQUIRE(vector.layout().are_equal(vector_layout)); - - compare_eigen(matrix.value(), eigen_matrix); - REQUIRE(matrix.layout().are_equal(matrix_layout)); + SECTION("value ctor") { + REQUIRE(eigen_scalar.layout().are_equal(scalar_layout)); + REQUIRE(eigen_vector.layout().are_equal(vector_layout)); + REQUIRE(eigen_matrix.layout().are_equal(matrix_layout)); + REQUIRE(eigen_tensor.layout().are_equal(tensor_layout)); } - test_copy_move_ctor_and_assignment(scalar, vector, matrix); - } - - SECTION("value()") { - compare_eigen(scalar.value(), eigen_scalar); - compare_eigen(vector.value(), eigen_vector); - compare_eigen(matrix.value(), eigen_matrix); + test_copy_move_ctor_and_assignment(eigen_scalar, eigen_vector, + eigen_matrix, eigen_tensor); } - SECTION("value() const") { - const auto& cscalar = scalar; - const auto& cvector = vector; - const auto& cmatrix = matrix; - compare_eigen(cscalar.value(), eigen_scalar); - compare_eigen(cvector.value(), eigen_vector); - compare_eigen(cmatrix.value(), eigen_matrix); + SECTION("swap") { + buffer_type copy(eigen_scalar); + eigen_scalar.swap(defaulted); + REQUIRE(defaulted == copy); + REQUIRE(eigen_scalar == buffer_type{}); } SECTION("operator==") { - // We assume the eigen tensor and the layout objects work. So when - // comparing Eigen objects we have four states: same everything, - // different everything, same tensor different layout, and different - // tensor same layout. + // Checking Layout/Allocator falls to base class tests + auto pscalar2 = testing::eigen_scalar(); + auto& eigen_scalar2 = static_cast(*pscalar2); + eigen_scalar2.at() = 10.0; - typename scalar_buffer::data_type eigen_scalar2; - eigen_scalar2() = 10.0; + // Defaulted != scalar + REQUIRE_FALSE(defaulted == eigen_scalar); // Everything the same - REQUIRE(scalar == scalar_buffer(eigen_scalar2, scalar_layout)); - - SECTION("Different scalar") { - eigen_scalar2() = 2.0; - scalar_buffer scalar2(eigen_scalar2, scalar_layout); - REQUIRE_FALSE(scalar == scalar2); - } + REQUIRE(eigen_scalar == eigen_scalar2); - SECTION("Different layout") { - scalar_buffer scalar2(eigen_scalar, vector_layout); - REQUIRE_FALSE(scalar == scalar2); - } - - SECTION("Different tensor and layout") { - eigen_scalar2() = 2.0; - scalar_buffer scalar2(eigen_scalar2, vector_layout); - REQUIRE_FALSE(scalar == scalar2); + SECTION("Different buffer value") { + eigen_scalar2.at() = 2.0; + REQUIRE_FALSE(eigen_scalar == eigen_scalar2); } } SECTION("operator!=") { - // This just negates operator== so spot-checking is okay - - typename scalar_buffer::data_type eigen_scalar2; - eigen_scalar2() = 10.0; - - // Everything the same - scalar_buffer scalar2(eigen_scalar2, scalar_layout); - REQUIRE_FALSE(scalar != scalar2); + auto pscalar2 = testing::eigen_scalar(); + auto& eigen_scalar2 = static_cast(*pscalar2); + eigen_scalar2.at() = 10.0; - eigen_scalar2() = 2.0; - scalar_buffer scalar3(eigen_scalar2, scalar_layout); - REQUIRE(scalar3 != scalar); + REQUIRE_FALSE(eigen_scalar != eigen_scalar2); + eigen_scalar2.at() = 2.0; + REQUIRE(eigen_scalar != eigen_scalar2); } SECTION("virtual method overrides") { - using const_reference = - typename scalar_buffer::const_buffer_base_reference; - const_reference pscalar = scalar; - const_reference pvector = vector; - const_reference pmatrix = matrix; - SECTION("clone") { - REQUIRE(pscalar.clone()->are_equal(pscalar)); - REQUIRE(pvector.clone()->are_equal(pvector)); - REQUIRE(pmatrix.clone()->are_equal(pmatrix)); + REQUIRE(eigen_scalar.clone()->are_equal(eigen_scalar)); + REQUIRE(eigen_vector.clone()->are_equal(eigen_vector)); + REQUIRE(eigen_matrix.clone()->are_equal(eigen_matrix)); } SECTION("are_equal") { - scalar_buffer scalar2(eigen_scalar, scalar_layout); - REQUIRE(pscalar.are_equal(scalar2)); - REQUIRE_FALSE(pmatrix.are_equal(scalar2)); - } - } - - SECTION("addition_assignment_") { - SECTION("scalar") { - scalar_buffer scalar2(eigen_scalar, scalar_layout); - scalar2.value()() = 42.0; - - auto s = scalar(""); - auto pscalar2 = &(scalar2.addition_assignment("", s, s)); - - scalar_buffer scalar_corr(eigen_scalar, scalar_layout); - scalar_corr.value()() = 20.0; - REQUIRE(pscalar2 == &scalar2); - REQUIRE(scalar2 == scalar_corr); - } - - SECTION("vector") { - auto vector2 = testing::eigen_vector(); - - auto vi = vector("i"); - auto pvector2 = &(vector2.addition_assignment("i", vi, vi)); - - vector_buffer vector_corr(eigen_vector, vector_layout); - vector_corr.value()(0) = 20.0; - vector_corr.value()(1) = 40.0; - - REQUIRE(pvector2 == &vector2); - REQUIRE(vector2 == vector_corr); - } - - SECTION("matrix : no permutation") { - auto matrix2 = testing::eigen_matrix(); - - auto mij = matrix("i,j"); - auto pmatrix2 = &(matrix2.addition_assignment("i,j", mij, mij)); - - matrix_buffer matrix_corr(eigen_matrix, matrix_layout); - - matrix_corr.value()(0, 0) = 20.0; - matrix_corr.value()(0, 1) = 40.0; - matrix_corr.value()(0, 2) = 60.0; - matrix_corr.value()(1, 0) = 80.0; - matrix_corr.value()(1, 1) = 100.0; - matrix_corr.value()(1, 2) = 120.0; - - REQUIRE(pmatrix2 == &matrix2); - REQUIRE(matrix2 == matrix_corr); - } - - SECTION("matrix: permutations") { - auto matrix2 = testing::eigen_matrix(); - auto l = testing::matrix_physical(3, 2); - std::array p10{1, 0}; - auto eigen_matrix_t = eigen_matrix.shuffle(p10); - matrix_buffer matrix1(eigen_matrix_t, l); - - auto mij = matrix("i,j"); - auto mji = matrix1("j,i"); - - matrix_buffer matrix_corr(eigen_matrix, matrix_layout); - - matrix_corr.value()(0, 0) = 20.0; - matrix_corr.value()(0, 1) = 40.0; - matrix_corr.value()(0, 2) = 60.0; - matrix_corr.value()(1, 0) = 80.0; - matrix_corr.value()(1, 1) = 100.0; - matrix_corr.value()(1, 2) = 120.0; - - SECTION("permute this") { - matrix2.addition_assignment("j,i", mij, mij); - - matrix_buffer corr(eigen_matrix_t, l); - corr.value()(0, 0) = 20.0; - corr.value()(0, 1) = 80.0; - corr.value()(1, 0) = 40.0; - corr.value()(1, 1) = 100.0; - corr.value()(2, 0) = 60.0; - corr.value()(2, 1) = 120.0; - - REQUIRE(matrix2 == corr); - } - - SECTION("permute LHS") { - matrix2.addition_assignment("i,j", mji, mij); - REQUIRE(matrix2 == matrix_corr); - } - - SECTION("permute RHS") { - matrix2.addition_assignment("i,j", mij, mji); - REQUIRE(matrix2 == matrix_corr); - } - } - - SECTION("tensor (must permute all)") { - auto tensor2 = testing::eigen_tensor3(); - - std::array p102{1, 0, 2}; - auto l102 = testing::tensor3_physical(2, 1, 3); - tensor_buffer tensor102(eigen_tensor.shuffle(p102), l102); - - auto tijk = tensor("i,j,k"); - auto tjik = tensor102("j,i,k"); - - tensor2.addition_assignment("k,j,i", tijk, tjik); - - std::array p210{2, 1, 0}; - auto l210 = testing::tensor3_physical(3, 2, 1); - tensor_buffer corr(eigen_tensor.shuffle(p210), l210); - corr.value()(0, 0, 0) = 20.0; - corr.value()(0, 1, 0) = 80.0; - corr.value()(1, 0, 0) = 40.0; - corr.value()(1, 1, 0) = 100.0; - corr.value()(2, 0, 0) = 60.0; - corr.value()(2, 1, 0) = 120.0; - REQUIRE(tensor2 == corr); - } - } - - SECTION("subtraction_assignment_") { - SECTION("scalar") { - scalar_buffer scalar2(eigen_scalar, scalar_layout); - scalar2.value()() = 42.0; - - auto s = scalar(""); - auto pscalar2 = &(scalar2.subtraction_assignment("", s, s)); - - scalar_buffer scalar_corr(eigen_scalar, scalar_layout); - scalar_corr.value()() = 0.0; - REQUIRE(pscalar2 == &scalar2); - REQUIRE(scalar2 == scalar_corr); - } - - SECTION("vector") { - auto vector2 = testing::eigen_vector(); - - auto vi = vector("i"); - auto pvector2 = &(vector2.subtraction_assignment("i", vi, vi)); - - vector_buffer vector_corr(eigen_vector, vector_layout); - vector_corr.value()(0) = 0.0; - vector_corr.value()(1) = 0.0; - - REQUIRE(pvector2 == &vector2); - REQUIRE(vector2 == vector_corr); - } - - SECTION("matrix : no permutation") { - auto matrix2 = testing::eigen_matrix(); - - auto mij = matrix("i,j"); - auto pmatrix2 = &(matrix2.subtraction_assignment("i,j", mij, mij)); - - matrix_buffer matrix_corr(eigen_matrix, matrix_layout); - - matrix_corr.value()(0, 0) = 0.0; - matrix_corr.value()(0, 1) = 0.0; - matrix_corr.value()(0, 2) = 0.0; - matrix_corr.value()(1, 0) = 0.0; - matrix_corr.value()(1, 1) = 0.0; - matrix_corr.value()(1, 2) = 0.0; - - REQUIRE(pmatrix2 == &matrix2); - REQUIRE(matrix2 == matrix_corr); - } - - SECTION("matrix: permutations") { - auto matrix2 = testing::eigen_matrix(); - auto l = testing::matrix_physical(3, 2); - std::array p10{1, 0}; - auto eigen_matrix_t = eigen_matrix.shuffle(p10); - matrix_buffer matrix1(eigen_matrix_t, l); - - auto mij = matrix("i,j"); - auto mji = matrix1("j,i"); - - matrix_buffer matrix_corr(eigen_matrix, matrix_layout); - - matrix_corr.value()(0, 0) = 0.0; - matrix_corr.value()(0, 1) = 0.0; - matrix_corr.value()(0, 2) = 0.0; - matrix_corr.value()(1, 0) = 0.0; - matrix_corr.value()(1, 1) = 0.0; - matrix_corr.value()(1, 2) = 0.0; - - SECTION("permute this") { - matrix2.subtraction_assignment("j,i", mij, mij); - - matrix_buffer corr(eigen_matrix_t, l); - corr.value()(0, 0) = 0.0; - corr.value()(0, 1) = 0.0; - corr.value()(1, 0) = 0.0; - corr.value()(1, 1) = 0.0; - corr.value()(2, 0) = 0.0; - corr.value()(2, 1) = 0.0; - - REQUIRE(matrix2 == corr); - } - - SECTION("permute LHS") { - matrix2.subtraction_assignment("i,j", mji, mij); - REQUIRE(matrix2 == matrix_corr); - } - - SECTION("permute RHS") { - matrix2.subtraction_assignment("i,j", mij, mji); - REQUIRE(matrix2 == matrix_corr); - } - } - - SECTION("tensor (must permute all)") { - auto tensor2 = testing::eigen_tensor3(); - - std::array p102{1, 0, 2}; - auto l102 = testing::tensor3_physical(2, 1, 3); - tensor_buffer tensor102(eigen_tensor.shuffle(p102), l102); - - auto tijk = tensor("i,j,k"); - auto tjik = tensor102("j,i,k"); - - tensor2.subtraction_assignment("k,j,i", tijk, tjik); - - std::array p210{2, 1, 0}; - auto l210 = testing::tensor3_physical(3, 2, 1); - tensor_buffer corr(eigen_tensor.shuffle(p210), l210); - corr.value()(0, 0, 0) = 0.0; - corr.value()(0, 1, 0) = 0.0; - corr.value()(1, 0, 0) = 0.0; - corr.value()(1, 1, 0) = 0.0; - corr.value()(2, 0, 0) = 0.0; - corr.value()(2, 1, 0) = 0.0; - REQUIRE(tensor2 == corr); - } - } - - SECTION("multiplication_assignment_") { - // Multiplication just dispatches to hadamard_ or contraction_ - // Here we test the error-handling - - // Must be either a pure hadamard or a pure contraction - auto matrix2 = testing::eigen_matrix(); - auto mij = matrix("i,j"); - - REQUIRE_THROWS_AS(matrix2.subtraction_assignment("i", mij, mij), - std::runtime_error); - } - - SECTION("permute_assignment_") { - SECTION("scalar") { - auto scalar2 = testing::eigen_scalar(); - scalar2.value()() = 42.0; - - auto s = scalar(""); - auto pscalar2 = &(scalar2.permute_assignment("", s)); - REQUIRE(pscalar2 == &scalar2); - REQUIRE(scalar2 == scalar); + REQUIRE(eigen_scalar.are_equal(eigen_scalar)); + REQUIRE_FALSE(eigen_matrix.are_equal(eigen_scalar)); } - SECTION("vector") { - auto vector2 = testing::eigen_vector(); + SECTION("addition_assignment") { + buffer_type output; + auto vi = eigen_vector("i"); + output.addition_assignment("i", vi, vi); - auto vi = vector("i"); - auto pvector2 = &(vector2.permute_assignment("i", vi)); + auto corr = testing::eigen_vector(2); + corr->at(0) = 20.0; + corr->at(1) = 40.0; - REQUIRE(pvector2 == &vector2); - REQUIRE(vector2 == vector); + REQUIRE(output.are_equal(*corr)); } - SECTION("matrix : no permutation") { - auto matrix2 = testing::eigen_matrix(); + SECTION("subtraction_assignment") { + buffer_type output; + auto vi = eigen_vector("i"); + output.subtraction_assignment("i", vi, vi); - auto mij = matrix("i,j"); - auto pmatrix2 = &(matrix2.permute_assignment("i,j", mij)); + auto corr = testing::eigen_vector(2); + corr->at(0) = 0.0; + corr->at(1) = 0.0; - REQUIRE(pmatrix2 == &matrix2); - REQUIRE(matrix2 == matrix); + REQUIRE(output.are_equal(*corr)); } - SECTION("matrix: permutation") { - auto matrix2 = testing::eigen_matrix(); - auto p = &(matrix2.permute_assignment("j,i", matrix("i,j"))); - - auto corr = testing::eigen_matrix(3, 2); - corr.value()(0, 0) = 10.0; - corr.value()(1, 0) = 20.0; - corr.value()(2, 0) = 30.0; - corr.value()(0, 1) = 40.0; - corr.value()(1, 1) = 50.0; - corr.value()(2, 1) = 60.0; - REQUIRE(p == &matrix2); - compare_eigen(corr.value(), matrix2.value()); - } - } - - SECTION("scalar_multiplication_") { - SECTION("scalar") { - auto scalar2 = testing::eigen_scalar(); - scalar2.value()() = 42.0; + SECTION("multiplication_assignment") { + buffer_type output; + auto vi = eigen_vector("i"); + output.multiplication_assignment("i", vi, vi); - auto s = scalar(""); - auto pscalar2 = &(scalar2.scalar_multiplication("", 2.0, s)); + auto corr = testing::eigen_vector(2); + corr->at(0) = 100.0; + corr->at(1) = 400.0; - auto corr = testing::eigen_scalar(); - corr.value()() = 20.0; - REQUIRE(pscalar2 == &scalar2); - REQUIRE(scalar2 == corr); + REQUIRE(output.are_equal(*corr)); } - SECTION("vector") { - auto vector2 = testing::eigen_vector(); - - auto vi = vector("i"); - auto pvector2 = &(vector2.scalar_multiplication("i", 2.0, vi)); + SECTION("permute_assignment") { + buffer_type output; + auto mij = eigen_matrix("i,j"); + output.permute_assignment("j,i", mij); - auto corr = testing::eigen_vector(2); - corr.value()(0) = 20.0; - corr.value()(1) = 40.0; + auto corr = testing::eigen_matrix(3, 2); + corr->at(0, 0) = 10.0; + corr->at(0, 1) = 40.0; + corr->at(1, 0) = 20.0; + corr->at(1, 1) = 50.0; + corr->at(2, 0) = 30.0; + corr->at(2, 1) = 60.0; - REQUIRE(pvector2 == &vector2); - REQUIRE(vector2 == corr); + REQUIRE(output.are_equal(*corr)); } - SECTION("matrix : no permutation") { - auto matrix2 = testing::eigen_matrix(); + SECTION("scalar_multiplication") { + buffer_type output; + auto vi = eigen_vector("i"); + output.scalar_multiplication("i", 2.0, vi); - auto mij = matrix("i,j"); - auto p = &(matrix2.scalar_multiplication("i,j", 2.0, mij)); + auto corr = testing::eigen_vector(2); + corr->at(0) = 20.0; + corr->at(1) = 40.0; - auto corr = testing::eigen_matrix(2, 3); - corr.value()(0, 0) = 20.0; - corr.value()(0, 1) = 40.0; - corr.value()(0, 2) = 60.0; - corr.value()(1, 0) = 80.0; - corr.value()(1, 1) = 100.0; - corr.value()(1, 2) = 120.0; - - REQUIRE(p == &matrix2); - REQUIRE(matrix2 == corr); + REQUIRE(output.are_equal(*corr)); } - SECTION("matrix: permutation") { - auto matrix2 = testing::eigen_matrix(); - auto mij = matrix("i,j"); - auto p = &(matrix2.scalar_multiplication("j,i", 2.0, mij)); - - auto corr = testing::eigen_matrix(3, 2); - corr.value()(0, 0) = 20.0; - corr.value()(1, 0) = 40.0; - corr.value()(2, 0) = 60.0; - corr.value()(0, 1) = 80.0; - corr.value()(1, 1) = 100.0; - corr.value()(2, 1) = 120.0; - REQUIRE(p == &matrix2); - compare_eigen(corr.value(), matrix2.value()); + SECTION("data()") { + REQUIRE(defaulted.data() == nullptr); + REQUIRE(*eigen_scalar.data() == TestType{10.0}); + REQUIRE(*eigen_matrix.data() == TestType{10.0}); } - } - - SECTION("hadamard_") { - SECTION("scalar") { - scalar_buffer scalar2(eigen_scalar, scalar_layout); - scalar2.value()() = 42.0; - - auto s = scalar(""); - auto pscalar2 = &(scalar2.multiplication_assignment("", s, s)); - scalar_buffer scalar_corr(eigen_scalar, scalar_layout); - scalar_corr.value()() = 100.0; - REQUIRE(pscalar2 == &scalar2); - REQUIRE(scalar2 == scalar_corr); + SECTION("data() const") { + REQUIRE(std::as_const(defaulted).data() == nullptr); + REQUIRE(*std::as_const(eigen_scalar).data() == TestType{10.0}); + REQUIRE(*std::as_const(eigen_matrix).data() == TestType{10.0}); } - SECTION("vector") { - auto vector2 = testing::eigen_vector(); - - auto vi = vector("i"); - auto pvector2 = &(vector2.multiplication_assignment("i", vi, vi)); - - vector_buffer vector_corr(eigen_vector, vector_layout); - vector_corr.value()(0) = 100.0; - vector_corr.value()(1) = 400.0; - - REQUIRE(pvector2 == &vector2); - REQUIRE(vector2 == vector_corr); + SECTION("get_elem_()") { + REQUIRE(eigen_scalar.at() == TestType{10.0}); + REQUIRE(eigen_vector.at(0) == TestType{10.0}); + REQUIRE(eigen_matrix.at(0, 0) == TestType{10.0}); } - SECTION("matrix : no permutation") { - auto matrix2 = testing::eigen_matrix(); - - auto mij = matrix("i,j"); - auto pmatrix2 = - &(matrix2.multiplication_assignment("i,j", mij, mij)); - - matrix_buffer matrix_corr(eigen_matrix, matrix_layout); - - matrix_corr.value()(0, 0) = 100.0; - matrix_corr.value()(0, 1) = 400.0; - matrix_corr.value()(0, 2) = 900.0; - matrix_corr.value()(1, 0) = 1600.0; - matrix_corr.value()(1, 1) = 2500.0; - matrix_corr.value()(1, 2) = 3600.0; - - REQUIRE(pmatrix2 == &matrix2); - REQUIRE(matrix2 == matrix_corr); - } - - SECTION("matrix: permutations") { - auto matrix2 = testing::eigen_matrix(); - auto l = testing::matrix_physical(3, 2); - std::array p10{1, 0}; - auto eigen_matrix_t = eigen_matrix.shuffle(p10); - matrix_buffer matrix1(eigen_matrix_t, l); - - auto mij = matrix("i,j"); - auto mji = matrix1("j,i"); - - matrix_buffer matrix_corr(eigen_matrix, matrix_layout); - - matrix_corr.value()(0, 0) = 100.0; - matrix_corr.value()(0, 1) = 400.0; - matrix_corr.value()(0, 2) = 900.0; - matrix_corr.value()(1, 0) = 1600.0; - matrix_corr.value()(1, 1) = 2500.0; - matrix_corr.value()(1, 2) = 3600.0; - - SECTION("permute this") { - matrix2.multiplication_assignment("j,i", mij, mij); - - matrix_buffer corr(eigen_matrix_t, l); - corr.value()(0, 0) = 100.0; - corr.value()(0, 1) = 1600.0; - corr.value()(1, 0) = 400.0; - corr.value()(1, 1) = 2500.0; - corr.value()(2, 0) = 900.0; - corr.value()(2, 1) = 3600.0; - - REQUIRE(matrix2 == corr); - } - - SECTION("permute LHS") { - matrix2.multiplication_assignment("i,j", mji, mij); - REQUIRE(matrix2 == matrix_corr); - } - - SECTION("permute RHS") { - matrix2.multiplication_assignment("i,j", mij, mji); - REQUIRE(matrix2 == matrix_corr); - } - } - - SECTION("tensor (must permute all)") { - auto tensor2 = testing::eigen_tensor3(); - - std::array p102{1, 0, 2}; - auto l102 = testing::tensor3_physical(2, 1, 3); - tensor_buffer tensor102(eigen_tensor.shuffle(p102), l102); - - auto tijk = tensor("i,j,k"); - auto tjik = tensor102("j,i,k"); - - tensor2.multiplication_assignment("k,j,i", tijk, tjik); - - std::array p210{2, 1, 0}; - auto l210 = testing::tensor3_physical(3, 2, 1); - tensor_buffer corr(eigen_tensor.shuffle(p210), l210); - corr.value()(0, 0, 0) = 100.0; - corr.value()(0, 1, 0) = 1600.0; - corr.value()(1, 0, 0) = 400.0; - corr.value()(1, 1, 0) = 2500.0; - corr.value()(2, 0, 0) = 900.0; - corr.value()(2, 1, 0) = 3600.0; - REQUIRE(tensor2 == corr); - } - } - - SECTION("contraction_") { - auto vi = vector("i"); - auto mij = matrix("i,j"); - auto mik = matrix("i,k"); - auto mjk = matrix("j,k"); - - SECTION("vector with vector") { - auto p = &(scalar.multiplication_assignment("", vi, vi)); - - auto scalar_corr = testing::eigen_scalar(); - scalar_corr.value()() = 500.0; // 10*10 + 20*20 - REQUIRE(p == &scalar); - compare_eigen(scalar_corr.value(), scalar.value()); - } - - SECTION("ij,ij->") { - auto p = &(scalar.multiplication_assignment("", mij, mij)); - - auto scalar_corr = testing::eigen_scalar(); - scalar_corr.value()() = 9100.0; // 1400 + 7700 - REQUIRE(p == &scalar); - compare_eigen(scalar_corr.value(), scalar.value()); - } - - SECTION("ki,kj->ij") { - auto buffer2 = testing::eigen_matrix(); - auto p = &(buffer2.multiplication_assignment("i,j", mik, mjk)); - - auto matrix_corr = testing::eigen_matrix(2, 2); - matrix_corr.value()(0, 0) = 1400.0; // 100 + 400 + 900 - matrix_corr.value()(0, 1) = 3200.0; // 400 + 1000 + 1800 - matrix_corr.value()(1, 0) = 3200.0; // 400 + 1000 + 1800 - matrix_corr.value()(1, 1) = 7700.0; // 1600 + 2500 + 3600 - - REQUIRE(p == &buffer2); - compare_eigen(matrix_corr.value(), buffer2.value()); - } - - SECTION("ij,i->j") { - auto buffer1 = testing::eigen_vector(); - auto p = &(buffer1.multiplication_assignment("j", mij, vi)); - - auto vector_corr = testing::eigen_vector(3); - vector_corr.value()(0) = 900.0; // 10(10) + 20(40) - vector_corr.value()(1) = 1200.0; // 10(20) + 20(50) - vector_corr.value()(2) = 1500.0; // 10(30) + 20(60) - REQUIRE(p == &buffer1); - compare_eigen(vector_corr.value(), buffer1.value()); + SECTION("get_elem_() const") { + REQUIRE(std::as_const(eigen_scalar).at() == TestType{10.0}); + REQUIRE(std::as_const(eigen_vector).at(0) == TestType{10.0}); + REQUIRE(std::as_const(eigen_matrix).at(0, 0) == TestType{10.0}); } } } diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/eigen_contraction.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/eigen_contraction.cpp deleted file mode 100644 index eef3f23a..00000000 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/eigen_contraction.cpp +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../testing/testing.hpp" -#include -#include - -using namespace tensorwrapper; -using namespace buffer; - -#ifdef ENABLE_SIGMA -using types2test = std::tuple; -#else -using types2test = std::tuple; -#endif - -TEMPLATE_LIST_TEST_CASE("eigen_contraction", "", types2test) { - using float_t = TestType; - using label_type = typename BufferBase::label_type; - - // Inputs - auto scalar = testing::eigen_scalar(); - auto vector = testing::eigen_vector(); - auto vector2 = testing::eigen_vector(2); - auto matrix = testing::eigen_matrix(); - - auto scalar_corr = testing::eigen_scalar(); - scalar_corr.value()() = 30.0; - - auto vector_corr = testing::eigen_vector(2); - vector_corr.value()(0) = 3.0; - vector_corr.value()(1) = 4.0; - - auto matrix_corr = testing::eigen_matrix(2, 2); - matrix_corr.value()(0, 0) = 10.0; - matrix_corr.value()(0, 1) = 14.0; - matrix_corr.value()(1, 0) = 14.0; - matrix_corr.value()(1, 1) = 20.0; - - label_type l(""); - label_type j("j"); - label_type ij("i,j"); - - auto mij = matrix("i,j"); - SECTION("vector with vector") { - auto vi = vector("i"); - auto& rv = eigen_contraction(scalar, l, vi, vi); - REQUIRE(&rv == static_cast(&scalar)); - REQUIRE(scalar_corr.are_equal(scalar)); - } - - SECTION("ij,ij->") { - auto& rv = eigen_contraction(scalar, l, mij, mij); - REQUIRE(&rv == static_cast(&scalar)); - REQUIRE(scalar_corr.are_equal(scalar)); - } - - SECTION("ki,kj->ij") { - auto mki = matrix("k,i"); - auto mkj = matrix("k,j"); - auto buffer = testing::eigen_matrix(); - auto& rv = eigen_contraction(buffer, ij, mki, mkj); - REQUIRE(&rv == static_cast(&buffer)); - REQUIRE(matrix_corr.are_equal(buffer)); - } - - SECTION("ij,i->j") { - auto vi = vector2("i"); - auto buffer = testing::eigen_vector(2); - auto& rv = eigen_contraction(buffer, j, mij, vi); - REQUIRE(&rv == static_cast(&buffer)); - REQUIRE(vector_corr.are_equal(rv)); - } - - SECTION("ki,jki->j") { - auto tensor = testing::eigen_tensor3(2); - auto matrix2 = testing::eigen_matrix(2); - auto buffer = testing::eigen_vector(2); - auto corr = testing::eigen_vector(2); - corr.value()(0) = 30; - corr.value()(1) = 70; - - auto tjki = tensor("j,k,i"); - auto mki = matrix2("k,i"); - auto& rv = eigen_contraction(buffer, j, mki, tjki); - REQUIRE(&rv == static_cast(&buffer)); - REQUIRE(corr.are_equal(rv)); - } - - SECTION("ki,jkl->jil") { - auto tensor = testing::eigen_tensor3(2); - auto matrix2 = testing::eigen_matrix(2); - auto buffer = testing::eigen_tensor3(2); - auto corr = testing::eigen_tensor3(); - corr.value()(0, 0, 0) = 10; - corr.value()(0, 0, 1) = 14; - corr.value()(0, 1, 0) = 14; - corr.value()(0, 1, 1) = 20; - - corr.value()(1, 0, 0) = 26; - corr.value()(1, 0, 1) = 30; - corr.value()(1, 1, 0) = 38; - corr.value()(1, 1, 1) = 44; - - auto tjki = tensor("j,k,l"); - auto mki = matrix2("k,i"); - label_type jil("j,i,l"); - auto& rv = eigen_contraction(buffer, jil, mki, tjki); - REQUIRE(&rv == static_cast(&buffer)); - REQUIRE(corr.are_equal(rv)); - } - - SECTION("kl,ijkl->ij") { - auto tensor = testing::eigen_tensor4(); - auto matrix2 = testing::eigen_matrix(2); - auto buffer = testing::eigen_matrix(2); - auto corr = testing::eigen_matrix(); - corr.value()(0, 0) = 30; - corr.value()(0, 1) = 70; - corr.value()(1, 0) = 110; - corr.value()(1, 1) = 150; - - auto lt = tensor("i,j,k,l"); - auto lm = matrix2("k,l"); - label_type jil("i,j"); - auto& rv = eigen_contraction(buffer, ij, lm, lt); - REQUIRE(&rv == static_cast(&buffer)); - REQUIRE(corr.are_equal(rv)); - } - - SECTION("kl,ilkj->ij") { - auto tensor = testing::eigen_tensor4(); - auto matrix2 = testing::eigen_matrix(2); - auto buffer = testing::eigen_matrix(2); - auto corr = testing::eigen_matrix(); - corr.value()(0, 0) = 48; - corr.value()(0, 1) = 58; - corr.value()(1, 0) = 128; - corr.value()(1, 1) = 138; - - auto lt = tensor("i,l,k,j"); - auto lm = matrix2("k,l"); - label_type jil("i,j"); - auto& rv = eigen_contraction(buffer, ij, lm, lt); - REQUIRE(&rv == static_cast(&buffer)); - REQUIRE(corr.are_equal(rv)); - } -} \ No newline at end of file diff --git a/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp b/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp index b70d2b4f..d45fbcbd 100644 --- a/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp @@ -85,14 +85,18 @@ TEMPLATE_LIST_TEST_CASE("DSL", "", testing::dsl_types) { // Since Eigen buffers are templated on the rank there isn't an easy way to // include them in dsl_types TEST_CASE("DSLr : buffer::Eigen") { - auto scalar0 = testing::eigen_scalar(); - auto scalar1 = testing::eigen_scalar(); - auto scalar2 = testing::eigen_scalar(); - auto corr = testing::eigen_scalar(); - - scalar0.value()() = 1.0; - scalar1.value()() = 2.0; - scalar2.value()() = 3.0; + auto pscalar0 = testing::eigen_scalar(); + auto pscalar1 = testing::eigen_scalar(); + auto pscalar2 = testing::eigen_scalar(); + auto pcorr = testing::eigen_scalar(); + auto& scalar0 = *pscalar0; + auto& scalar1 = *pscalar1; + auto& scalar2 = *pscalar2; + auto& corr = *pcorr; + + scalar0.at() = 1.0; + scalar1.at() = 2.0; + scalar2.at() = 3.0; SECTION("assignment") { SECTION("scalar") { diff --git a/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp b/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp index 746b8f33..65151be2 100644 --- a/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp @@ -124,14 +124,19 @@ TEMPLATE_LIST_TEST_CASE("PairwiseParser", "", testing::dsl_types) { // Since Eigen buffers are templated on the rank there isn't an easy way to // include them in dsl_types TEST_CASE("PairwiseParser : buffer::Eigen") { - auto scalar0 = testing::eigen_scalar(); - auto scalar1 = testing::eigen_scalar(); - auto scalar2 = testing::eigen_scalar(); - auto corr = testing::eigen_scalar(); - - scalar0.value()() = 1.0; - scalar1.value()() = 2.0; - scalar2.value()() = 3.0; + auto pscalar0 = testing::eigen_scalar(); + auto pscalar1 = testing::eigen_scalar(); + auto pscalar2 = testing::eigen_scalar(); + auto pcorr = testing::eigen_scalar(); + + auto& scalar0 = *pscalar0; + auto& scalar1 = *pscalar1; + auto& scalar2 = *pscalar2; + auto& corr = *pcorr; + + scalar0.at() = 1.0; + scalar1.at() = 2.0; + scalar2.at() = 3.0; dsl::PairwiseParser p; diff --git a/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp b/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp new file mode 100644 index 00000000..04d6671b --- /dev/null +++ b/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp @@ -0,0 +1,82 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../testing/testing.hpp" + +using namespace tensorwrapper; +using namespace operations; + +TEST_CASE("approximately_equal") { + Tensor scalar(42.0); + Tensor vector{1.23, 2.34}; + + SECTION("different ranks") { + REQUIRE_FALSE(approximately_equal(scalar, vector)); + } + + SECTION("Same values") { + REQUIRE(approximately_equal(scalar, Tensor(42.0))); + REQUIRE(approximately_equal(vector, Tensor{1.23, 2.34})); + } + + SECTION("Differ by default tolerance") { + double value = 1e-16; + REQUIRE_FALSE(approximately_equal(Tensor(0.0), Tensor(value))); + REQUIRE_FALSE( + approximately_equal(Tensor{1.23, 0.0}, Tensor{1.23, value})); + REQUIRE_FALSE( + approximately_equal(Tensor{0.0, 2.34}, Tensor{value, 2.34})); + } + + SECTION("Differ by more than default tolerance") { + double value = 1e-16; + REQUIRE_FALSE(approximately_equal(scalar, Tensor(value))); + REQUIRE_FALSE(approximately_equal(vector, Tensor{1.23, value})); + REQUIRE_FALSE(approximately_equal(vector, Tensor{value, 2.34})); + } + + SECTION("Differ by less than default tolerance") { + double value = 1e-17; + REQUIRE(approximately_equal(Tensor(0.0), Tensor(value))); + REQUIRE(approximately_equal(Tensor{1.23, 0.0}, Tensor{1.23, value})); + REQUIRE(approximately_equal(Tensor{0.0, 2.34}, Tensor{value, 2.34})); + } + + SECTION("Differ by provided tolerance") { + double value = 1e-1; + REQUIRE_FALSE(approximately_equal(Tensor(0.0), Tensor(value), value)); + REQUIRE_FALSE( + approximately_equal(Tensor{1.23, 0.0}, Tensor{1.23, value}, value)); + REQUIRE_FALSE( + approximately_equal(Tensor{0.0, 2.34}, Tensor{value, 2.34}, value)); + } + + SECTION("Differ by more than provided tolerance") { + double value = 1e-1; + REQUIRE_FALSE(approximately_equal(scalar, Tensor(value), value)); + REQUIRE_FALSE(approximately_equal(vector, Tensor{1.23, value}, value)); + REQUIRE_FALSE(approximately_equal(vector, Tensor{value, 2.34}, value)); + } + + SECTION("Differ by less than provided tolerance") { + double value = 1e-2; + REQUIRE(approximately_equal(Tensor(0.0), Tensor(value), 1e-1)); + REQUIRE( + approximately_equal(Tensor{1.23, 0.0}, Tensor{1.23, value}, 1e-1)); + REQUIRE( + approximately_equal(Tensor{0.0, 2.34}, Tensor{value, 2.34}, 1e-1)); + } +} \ No newline at end of file diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp index d4a74d62..fd4e3f63 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp @@ -51,7 +51,7 @@ TEST_CASE("TensorFactory") { layout::Physical physical(shape, g, sparsity); auto pphysical = physical.clone_as(); - allocator::Eigen alloc(rv); + allocator::Eigen alloc(rv); auto pbuffer = alloc.allocate(std::move(pphysical)); auto buffer_address = pbuffer.get(); diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp index 9814fde5..d7657add 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp @@ -36,8 +36,9 @@ TEST_CASE("TensorInput") { sparsity::Pattern sparsity(2); layout::Logical logical(shape, g, sparsity); layout::Physical physical(shape, g, sparsity); - allocator::Eigen alloc(rv); - buffer::Eigen buffer; + allocator::Eigen alloc(rv); + auto pbuffer = alloc.construct(42.0); + auto& buffer = *pbuffer; detail_::TensorInput defaulted; detail_::TensorInput scalar(shape::Smooth{}); diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_pimpl.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_pimpl.cpp index 26c45120..0c92de48 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_pimpl.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_pimpl.cpp @@ -21,7 +21,6 @@ #include using namespace tensorwrapper; -using buffer_type = buffer::Eigen; TEST_CASE("TensorPIMPL") { auto input = testing::smooth_vector_input(); @@ -98,7 +97,6 @@ TEST_CASE("TensorPIMPL") { auto other_vector = testing::smooth_vector_alt(); detail_::TensorPIMPL diff(std::move(plogical2), std::move(other_vector.m_pbuffer)); - buffer_type buffer2; } } } diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/tensor_class.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/tensor_class.cpp index 6e1f0ea5..f6a1ebf0 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/tensor_class.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/tensor_class.cpp @@ -220,6 +220,24 @@ TEST_CASE("Tensor") { REQUIRE(prv == &rv); REQUIRE(rv == Tensor{0, 1, 4, 9, 16}); } + + SECTION("ij,jkl->ikl") { + Tensor output; + + Tensor matrix(testing::smooth_matrix_()); + Tensor tensor(testing::smooth_tensor3_()); + + auto m = matrix("i,j"); + auto t = tensor("j,k,l"); + + auto poutput = &(output.multiplication_assignment("i,k,l", m, t)); + + Tensor corr{{{11.0, 14.0}, {17.0, 20.0}}, + {{23.0, 30.0}, {37.0, 44.0}}}; + + REQUIRE(poutput == &output); + REQUIRE(corr == output); + } } SECTION("scalar_multiplication") { SECTION("scalar") { diff --git a/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp b/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp index 8f6f2977..e3ab472d 100644 --- a/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp +++ b/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp @@ -26,87 +26,64 @@ namespace tensorwrapper::testing { -// Typedefs of buffer::Eigen objects with various template parameters -using ebufferf0 = buffer::Eigen; -using ebufferf1 = buffer::Eigen; -using ebufferf2 = buffer::Eigen; -using ebufferf3 = buffer::Eigen; -using ebufferd0 = buffer::Eigen; -using ebufferd1 = buffer::Eigen; -using ebufferd2 = buffer::Eigen; -using ebufferd3 = buffer::Eigen; +template +auto make_allocator() { + parallelzone::runtime::RuntimeView rv; + return allocator::Eigen(rv); +} template auto eigen_scalar() { - using buffer_type = buffer::Eigen; - using data_type = typename buffer_type::data_type; - data_type scalar; - scalar() = 42.0; - shape::Smooth shape{}; - layout::Physical l(shape); - return buffer_type(scalar, l); + auto alloc = make_allocator(); + return alloc.construct(42.0); } template auto eigen_vector(std::size_t n = 5) { - using buffer_type = buffer::Eigen; - using data_type = typename buffer_type::data_type; - data_type vector(n); - for(std::size_t i = 0; i < n; ++i) vector(i) = i; - shape::Smooth shape{n}; - layout::Physical l(shape); - return buffer_type(vector, l); + layout::Physical l(shape::Smooth{n}); + auto alloc = make_allocator(); + auto buffer = alloc.allocate(l); + for(std::size_t i = 0; i < n; ++i) buffer->at(i) = i; + return buffer; } template auto eigen_matrix(std::size_t n = 2, std::size_t m = 2) { - using buffer_type = buffer::Eigen; - using data_type = typename buffer_type::data_type; - data_type matrix(n, m); + layout::Physical l(shape::Smooth{n, m}); + auto alloc = make_allocator(); + auto buffer = alloc.allocate(l); double counter = 1.0; - for(std::size_t i = 0; i < n; ++i) - for(std::size_t j = 0; j < m; ++j) matrix(i, j) = counter++; - - shape::Smooth shape{n, m}; - layout::Physical l(shape); - return buffer_type(matrix, l); + for(decltype(n) i = 0; i < n; ++i) + for(decltype(m) j = 0; j < m; ++j) buffer->at(i, j) = counter++; + return buffer; } template auto eigen_tensor3(std::size_t n = 2, std::size_t m = 2, std::size_t l = 2) { - using buffer_type = buffer::Eigen; - using data_type = typename buffer_type::data_type; - int in = static_cast(n); - int im = static_cast(m); - int il = static_cast(l); - shape::Smooth shape{n, m, l}; - layout::Physical layout(shape); - data_type e_tensor(in, im, il); + layout::Physical layout(shape::Smooth{n, m, l}); + auto alloc = make_allocator(); + auto buffer = alloc.allocate(layout); double counter = 1.0; - for(decltype(in) i = 0; i < in; ++i) - for(decltype(im) j = 0; j < im; ++j) - for(decltype(il) k = 0; k < il; ++k) e_tensor(i, j, k) = counter++; - return buffer_type(e_tensor, layout); + for(decltype(n) i = 0; i < n; ++i) + for(decltype(m) j = 0; j < m; ++j) + for(decltype(l) k = 0; k < l; ++k) buffer->at(i, j, k) = counter++; + return buffer; } template auto eigen_tensor4(std::array extents = {2, 2, 2, 2}) { - auto constexpr Rank = 4; - using buffer_type = buffer::Eigen; - using data_type = typename buffer_type::data_type; - std::array iextents; - for(std::size_t i = 0; i < Rank; ++i) iextents[i] = extents[i]; shape::Smooth shape{extents[0], extents[1], extents[2], extents[3]}; layout::Physical layout(shape); - data_type e_tensor(iextents[0], iextents[1], iextents[2], iextents[3]); + auto alloc = make_allocator(); + auto buffer = alloc.allocate(layout); double counter = 1.0; - std::array i; - for(i[0] = 0; i[0] < iextents[0]; ++i[0]) - for(i[1] = 0; i[1] < iextents[1]; ++i[1]) - for(i[2] = 0; i[2] < iextents[2]; ++i[2]) - for(i[3] = 0; i[3] < iextents[3]; ++i[3]) - e_tensor(i[0], i[1], i[2], i[3]) = counter++; - return buffer_type(e_tensor, layout); + decltype(extents) i; + for(i[0] = 0; i[0] < extents[0]; ++i[0]) + for(i[1] = 0; i[1] < extents[1]; ++i[1]) + for(i[2] = 0; i[2] < extents[2]; ++i[2]) + for(i[3] = 0; i[3] < extents[3]; ++i[3]) + buffer->at(i[0], i[1], i[2], i[3]) = counter++; + return buffer; } } // namespace tensorwrapper::testing \ No newline at end of file diff --git a/tests/cxx/unit_tests/tensorwrapper/testing/inputs.hpp b/tests/cxx/unit_tests/tensorwrapper/testing/inputs.hpp index ae3151b8..073ea32c 100644 --- a/tests/cxx/unit_tests/tensorwrapper/testing/inputs.hpp +++ b/tests/cxx/unit_tests/tensorwrapper/testing/inputs.hpp @@ -47,13 +47,9 @@ inline auto smooth_vector_input() { return smooth_vector_(); } /// 5 element vector internally stored as a 5 by 1 matrix inline auto smooth_vector_alt() { - using buffer_type = buffer::Eigen; - using data_type = typename buffer_type::data_type; + auto pbuffer = eigen_matrix(5, 1); shape::Smooth shape{5}; - layout::Physical l(shape::Smooth{5, 1}); - data_type matrix(5, 1); - for(std::size_t i = 0; i < 5; ++i) matrix(i, 0) = i; - return detail_::TensorInput(shape, buffer_type(matrix, l)); + return detail_::TensorInput(shape, std::move(pbuffer)); } template @@ -66,23 +62,21 @@ inline auto smooth_matrix_() { inline auto smooth_matrix_input() { return smooth_matrix_(); } inline auto smooth_symmetric_matrix_input() { - using buffer_type = buffer::Eigen; - using data_type = typename buffer_type::data_type; + auto pmatrix = eigen_matrix(3, 3); + pmatrix->at(0, 0) = 1.0; + pmatrix->at(0, 1) = 2.0; + pmatrix->at(0, 2) = 3.0; + pmatrix->at(1, 0) = 2.0; + pmatrix->at(1, 1) = 4.0; + pmatrix->at(1, 2) = 5.0; + pmatrix->at(2, 0) = 3.0; + pmatrix->at(2, 1) = 5.0; + pmatrix->at(2, 2) = 6.0; shape::Smooth shape{3, 3}; - layout::Physical l(shape); - data_type matrix(3, 3); - matrix(0, 0) = 1.0; - matrix(0, 1) = 2.0; - matrix(0, 2) = 3.0; - matrix(1, 0) = 2.0; - matrix(1, 1) = 4.0; - matrix(1, 2) = 5.0; - matrix(2, 0) = 3.0; - matrix(2, 1) = 5.0; - matrix(2, 2) = 6.0; symmetry::Permutation p01{0, 1}; symmetry::Group g(p01); - return detail_::TensorInput(shape, g, buffer_type(matrix, l)); + + return detail_::TensorInput(shape, g, std::move(pmatrix)); } template @@ -95,28 +89,9 @@ inline auto smooth_tensor3_() { inline auto smooth_tensor3_input() { return smooth_tensor3_(); } inline auto smooth_tensor4_input() { - using buffer_type = buffer::Eigen; - using data_type = typename buffer_type::data_type; shape::Smooth shape{2, 2, 2, 2}; - layout::Physical l(shape); - data_type tensor(2, 2, 2, 2); - tensor(0, 0, 0, 0) = 1.0; - tensor(0, 0, 0, 1) = 2.0; - tensor(0, 0, 1, 0) = 3.0; - tensor(0, 0, 1, 1) = 4.0; - tensor(0, 1, 0, 0) = 5.0; - tensor(0, 1, 0, 1) = 6.0; - tensor(0, 1, 1, 0) = 7.0; - tensor(0, 1, 1, 1) = 8.0; - tensor(1, 0, 0, 0) = 9.0; - tensor(1, 0, 0, 1) = 10.0; - tensor(1, 0, 1, 0) = 11.0; - tensor(1, 0, 1, 1) = 12.0; - tensor(1, 1, 0, 0) = 13.0; - tensor(1, 1, 0, 1) = 14.0; - tensor(1, 1, 1, 0) = 15.0; - tensor(1, 1, 1, 1) = 16.0; - return detail_::TensorInput(shape, buffer_type(tensor, l)); + auto pbuffer = eigen_tensor4(); + return detail_::TensorInput(shape, std::move(pbuffer)); } } // namespace tensorwrapper::testing diff --git a/tests/cxx/unit_tests/tensorwrapper/testing/testing.hpp b/tests/cxx/unit_tests/tensorwrapper/testing/testing.hpp index 35bdee71..b3dd57b6 100644 --- a/tests/cxx/unit_tests/tensorwrapper/testing/testing.hpp +++ b/tests/cxx/unit_tests/tensorwrapper/testing/testing.hpp @@ -22,13 +22,4 @@ #include "layouts.hpp" #include "shapes.hpp" -namespace tensorwrapper::testing { - -#ifdef ENABLE_SIGMA -using floating_point_types = - std::tuple; -#else -using floating_point_types = std::tuple; -#endif - -} // namespace tensorwrapper::testing \ No newline at end of file +namespace tensorwrapper::testing {} // namespace tensorwrapper::testing \ No newline at end of file