diff --git a/include/tensorwrapper/allocator/allocator_base.hpp b/include/tensorwrapper/allocator/allocator_base.hpp index e3734d52..ceeddd40 100644 --- a/include/tensorwrapper/allocator/allocator_base.hpp +++ b/include/tensorwrapper/allocator/allocator_base.hpp @@ -54,6 +54,12 @@ class AllocatorBase : public detail_::PolymorphicBase { /// Type all buffers derive from using buffer_base_type = buffer::BufferBase; + /// Type of a mutable reference to an object of type buffer_base_type + using buffer_base_reference = buffer_base_type&; + + /// Type of a read-only reference to an object of type buffer_base_type + using const_buffer_base_reference = const buffer_base_type&; + /// Type of a pointer to an object of type buffer_base_type using buffer_base_pointer = typename buffer_base_type::buffer_base_pointer; diff --git a/include/tensorwrapper/allocator/eigen.hpp b/include/tensorwrapper/allocator/eigen.hpp index 9d655ea1..051046e6 100644 --- a/include/tensorwrapper/allocator/eigen.hpp +++ b/include/tensorwrapper/allocator/eigen.hpp @@ -16,7 +16,7 @@ #pragma once #include -#include +#include namespace tensorwrapper::allocator { @@ -42,13 +42,21 @@ class Eigen : public Replicated { // Pull in base class's types using my_base_type::base_pointer; using my_base_type::buffer_base_pointer; + using my_base_type::buffer_base_reference; using my_base_type::const_base_reference; + using my_base_type::const_buffer_base_reference; using my_base_type::layout_pointer; using my_base_type::runtime_view_type; /// Type of a buffer containing an Eigen tensor using eigen_buffer_type = buffer::Eigen; + /// Type of a mutable reference to an object of type eigen_buffer_type + using eigen_buffer_reference = eigen_buffer_type&; + + /// Type of a read-only reference to an object of type eigen_buffer_type + using const_eigen_buffer_reference = const eigen_buffer_type&; + /// Type of a pointer to an eigen_buffer_type object using eigen_buffer_pointer = std::unique_ptr; @@ -140,6 +148,53 @@ class Eigen : public Replicated { return pbuffer; } + /** @brief Determines if @p buffer can be rebound as an Eigen buffer. + * + * Rebinding a buffer allows the same memory to be viewed as a (possibly) + * different type of buffer. + * + * @param[in] buffer The tensor we are attempting to rebind. + * + * @return True if @p buffer can be rebound to the type of buffer + * associated with this allocator and false otherwise. + * + * @throw None No throw guarantee + */ + static bool can_rebind(const_buffer_base_reference buffer); + + /** @brief Rebinds a buffer to the same type as *this. + * + * This method will convert @p buffer into a buffer which could have been + * allocated by *this. If @p buffer was allocated as such a buffer already, + * then this method is simply a downcast. + * + * @param[in] buffer The buffer to rebind. + * + * @return A mutable reference to @p buffer viewed as a buffer that could + * have been allocated by *this. + * + * @throw std::runtime_error if can_rebind(buffer) is false. Strong throw + * guarantee. + */ + static eigen_buffer_reference rebind(buffer_base_reference buffer); + + /** @brief Rebinds a buffer to the same type as *this. + * + * This method is the same as the non-const version except that the result + * is read-only. See the description for the non-const version for more + * details. + * + * @param[in] buffer The buffer to rebind. + * + * @return A read-only reference to @p buffer viewed as if it was + * allocated by *this. + * + * @throw std::runtime_error if can_rebind(buffer) is false. Strong throw + * guarantee. + */ + static const_eigen_buffer_reference rebind( + const_buffer_base_reference buffer); + /** @brief Is *this value equal to @p rhs? * * @tparam FloatType2 The numerical type @p rhs uses for its elements. diff --git a/include/tensorwrapper/buffer/buffer_base.hpp b/include/tensorwrapper/buffer/buffer_base.hpp index 9e85c820..52f812ab 100644 --- a/include/tensorwrapper/buffer/buffer_base.hpp +++ b/include/tensorwrapper/buffer/buffer_base.hpp @@ -16,7 +16,9 @@ #pragma once #include +#include #include + namespace tensorwrapper::buffer { /** @brief Common base class for all buffer objects. @@ -35,6 +37,9 @@ class BufferBase : public detail_::PolymorphicBase { /// Type all buffers inherit from using buffer_base_type = typename my_base_type::base_type; + /// Type of a mutable reference to a buffer_base_type object + using buffer_base_reference = typename my_base_type::base_reference; + /// Type of a read-only reference to a buffer_base_type object using const_buffer_base_reference = typename my_base_type::const_base_reference; @@ -42,6 +47,9 @@ class BufferBase : public detail_::PolymorphicBase { /// Type of a pointer to an object of type buffer_base_type using buffer_base_pointer = typename my_base_type::base_pointer; + /// Type of a pointer to a read-only object of type buffer_base_type + using const_buffer_base_pointer = typename my_base_type::const_base_pointer; + /// Type of the class describing the physical layout of the buffer using layout_type = layout::LayoutBase; @@ -51,6 +59,18 @@ class BufferBase : public detail_::PolymorphicBase { /// Type of a pointer to the layout using layout_pointer = typename layout_type::layout_pointer; + /// Type of labels for making a labeled buffer + using label_type = std::string; + + /// Type of a labeled buffer + using labeled_buffer_type = dsl::Labeled; + + /// Type of a labeled read-only buffer (n.b. labels are mutable) + using labeled_const_buffer_type = dsl::Labeled; + + /// Type of a read-only reference to a labeled_buffer_type object + using const_labeled_buffer_reference = const labeled_const_buffer_type&; + // ------------------------------------------------------------------------- // -- Accessors // ------------------------------------------------------------------------- @@ -82,10 +102,128 @@ class BufferBase : public detail_::PolymorphicBase { return *m_layout_; } + // ------------------------------------------------------------------------- + // -- BLAS Operations + // ------------------------------------------------------------------------- + + /** @brief Set this to the result of *this + rhs. + * + * This method will overwrite the state of *this with the result of + * adding the original state of *this to that of @p rhs. Depending on the + * value @p this_labels compared to the labels associated with @p rhs, + * it may be a permutation of @p rhs that is added to *this. + * + * @param[in] this_labels The labels to associate with the modes of *this. + * @param[in] rhs The buffer to add into *this. + * + * @throws ??? Throws if the derived class's implementation throws. Same + * throw guarantee. + */ + buffer_base_reference addition_assignment( + label_type this_labels, const_labeled_buffer_reference rhs) { + return addition_assignment_(std::move(this_labels), rhs); + } + + /** @brief Returns the result of *this + rhs. + * + * This method is the same as addition_assignment except that the result + * is returned in a newly allocated buffer instead of overwriting *this. + * + * @param[in] this_labels the labels for the modes of *this. + * @param[in] rhs The buffer to add to *this. + * + * @return The buffer resulting from adding *this to @p rhs. + * + * @throw std::bad_alloc if there is a problem copying *this. Strong throw + * guarantee. + * @throw ??? If addition_assignment throws when adding @p rhs to the + * copy of *this. Same throw guarantee. + */ + buffer_base_pointer addition(label_type this_labels, + const_labeled_buffer_reference rhs) const { + auto pthis = clone(); + pthis->addition_assignment(std::move(this_labels), rhs); + return pthis; + } + + /** @brief Sets *this to a permutation of @p rhs. + * + * `rhs.rhs()` are the dummy indices associated with the modes of the + * buffer in @p rhs and @p this_labels are the dummy indices associated + * with the buffer in *this. This method will permute @p rhs so that the + * resulting buffer's modes are ordered consistently with @p this_labels, + * i.e. the permutation is FROM the `rhs.rhs()` order TO the + * @p this_labels order. This is seemingly backwards when described out, + * but consistent with the intent of a DSL expression like + * `t("i,j") = x("j,i");` where the intent is to set `t` equal to the + * transpose of `x`. + * + * @param[in] this_labels the dummy indices for the modes of *this. + * @param[in] rhs The tensor to permute. + * + * @return *this after setting it equal to a permutation of @p rhs. + * + * @throw ??? If the derived class's implementation of permute_assignment_ + * throws. Same throw guarantee. + */ + buffer_base_reference permute_assignment( + label_type this_labels, const_labeled_buffer_reference rhs) { + return permute_assignment_(std::move(this_labels), rhs); + } + + /** @brief Returns a copy of *this obtained by permuting *this. + * + * This method simply calls permute_assignment on a copy of *this. See the + * description of permute_assignment for more details. + * + * @param[in] this_labels dummy indices representing the modes of *this in + * its current state. + * @param[in] out_labels how the user wants the modes of *this to be + * ordered. + * + * @throw std::bad_alloc if there is a problem allocating the copy. Strong + * throw guarantee. + * @throw ??? If the derived class's implementation of permute_assignment_ + * throws. Same throw guarantee. + */ + buffer_base_pointer permute(label_type this_labels, + label_type out_labels) const { + auto pthis = clone(); + pthis->permute_assignment(std::move(out_labels), (*this)(this_labels)); + return pthis; + } + // ------------------------------------------------------------------------- // -- Utility methods // ------------------------------------------------------------------------- + /** @brief Associates labels with the modes of *this. + * + * This method is used to create a labeled buffer object by pairing *this + * with the provided labels. The resulting object is capable of being + * composed via the DSL. + * + * @param[in] labels The indices to associate with the modes of *this. + * + * @return A DSL term pairing *this with @p labels. + * + * @throw None No throw guarantee. + */ + labeled_buffer_type operator()(label_type labels); + + /** @brief Associates labels with the modes of *this. + * + * This method is the same as the non-const version except that the result + * contains a read-only reference to *this. + * + * @param[in] labels The labels to associate with *this. + * + * @return A DSL term pairing *this with @p labels. + * + * @throw None No throw guarantee. + */ + labeled_const_buffer_type operator()(label_type labels) const; + /** @brief Is *this value equal to @p rhs? * * Two BufferBase objects are value equal if the layouts they contain are @@ -183,6 +321,18 @@ class BufferBase : public detail_::PolymorphicBase { return *this; } + /// Derived class should overwrite to implement addition_assignment + virtual buffer_base_reference addition_assignment_( + label_type this_labels, const_labeled_buffer_reference rhs) { + throw std::runtime_error("Addition assignment NYI"); + } + + /// Derived class should overwrite to implement permute_assignment + virtual buffer_base_reference permute_assignment_( + label_type this_labels, const_labeled_buffer_reference rhs) { + throw std::runtime_error("Permute assignment NYI"); + } + private: /// Throws std::runtime_error when there is no layout void assert_layout_() const { diff --git a/include/tensorwrapper/buffer/buffer_fwd.hpp b/include/tensorwrapper/buffer/buffer_fwd.hpp new file mode 100644 index 00000000..51902238 --- /dev/null +++ b/include/tensorwrapper/buffer/buffer_fwd.hpp @@ -0,0 +1,30 @@ +/* + * Copyright 2024 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +namespace tensorwrapper::buffer { + +class BufferBase; + +template +class Eigen; + +class Local; + +class Replicated; + +} // namespace tensorwrapper::buffer \ No newline at end of file diff --git a/include/tensorwrapper/buffer/eigen.hpp b/include/tensorwrapper/buffer/eigen.hpp index ef4c0425..4f80a3e0 100644 --- a/include/tensorwrapper/buffer/eigen.hpp +++ b/include/tensorwrapper/buffer/eigen.hpp @@ -39,7 +39,9 @@ class Eigen : public Replicated { /// Pull in base class's types using typename my_base_type::buffer_base_pointer; using typename my_base_type::const_buffer_base_reference; + using typename my_base_type::const_labeled_buffer_reference; using typename my_base_type::const_layout_reference; + using typename my_base_type::label_type; /// Type of a rank @p Rank tensor using floats of type @p FloatType using data_type = eigen::data_type; @@ -180,9 +182,38 @@ class Eigen : public Replicated { return my_base_type::are_equal_impl_(rhs); } + /// Implements addition_assignment by rebinding rhs to an Eigen buffer + buffer_base_reference addition_assignment_( + label_type this_labels, const_labeled_buffer_reference rhs) override; + + /// Implements permute assignment by deferring to Eigen's shuffle command. + buffer_base_reference permute_assignment_( + label_type this_labels, const_labeled_buffer_reference rhs) override; + + /// Implements to_string + typename my_base_type::string_type to_string_() const override; + private: /// The actual Eigen tensor data_type m_tensor_; }; +#define DECLARE_EIGEN_BUFFER(RANK) \ + extern template class Eigen; \ + extern template class Eigen + +DECLARE_EIGEN_BUFFER(0); +DECLARE_EIGEN_BUFFER(1); +DECLARE_EIGEN_BUFFER(2); +DECLARE_EIGEN_BUFFER(3); +DECLARE_EIGEN_BUFFER(4); +DECLARE_EIGEN_BUFFER(5); +DECLARE_EIGEN_BUFFER(6); +DECLARE_EIGEN_BUFFER(7); +DECLARE_EIGEN_BUFFER(8); +DECLARE_EIGEN_BUFFER(9); +DECLARE_EIGEN_BUFFER(10); + +#undef DECLARE_EIGEN_BUFFER + } // namespace tensorwrapper::buffer diff --git a/include/tensorwrapper/detail_/polymorphic_base.hpp b/include/tensorwrapper/detail_/polymorphic_base.hpp index 15e0ae0c..f6a7c152 100644 --- a/include/tensorwrapper/detail_/polymorphic_base.hpp +++ b/include/tensorwrapper/detail_/polymorphic_base.hpp @@ -16,7 +16,9 @@ #pragma once #include +#include #include + namespace tensorwrapper::detail_ { /** @brief Defines the API polymorphic utility methods should use. @@ -42,6 +44,12 @@ class PolymorphicBase { /// Pointer to an object of type base_type using base_pointer = std::unique_ptr; + /// Pointer to a read-only object of type base_type + using const_base_pointer = std::unique_ptr; + + /// Type used for representing *this as a string + using string_type = std::string; + /// @brief Defaulted no-throw polymorphic dtor virtual ~PolymorphicBase() noexcept = default; @@ -129,6 +137,19 @@ class PolymorphicBase { return !are_equal(rhs); } + /** @brief Returns a string representation of *this. + * + * By default a polymorphic object is represented as `"{?}"`. Derived + * classes are encouraged to override `to_string_` to provide more + * helpful representations. + * + * @note This method is meant primarily for logging/debugging and NOT for + * serialization or archival. + * + * @return *this represented as a string. + */ + auto to_string() const { return to_string_(); } + protected: /** @brief No-op default ctor * @@ -200,6 +221,14 @@ class PolymorphicBase { * false otherwise. */ virtual bool are_equal_(const_base_reference rhs) const noexcept = 0; + + virtual string_type to_string_() const { return "{?}"; } }; +/// Implements printing via ostream for objects deriving from PolymorphicBase +template +inline std::ostream& operator<<(std::ostream& os, const PolymorphicBase& b) { + return os << b.to_string(); +} + } // namespace tensorwrapper::detail_ diff --git a/include/tensorwrapper/dsl/dsl.hpp b/include/tensorwrapper/dsl/dsl.hpp new file mode 100644 index 00000000..24375be6 --- /dev/null +++ b/include/tensorwrapper/dsl/dsl.hpp @@ -0,0 +1,20 @@ +/* + * Copyright 2024 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include +#include +#include \ No newline at end of file diff --git a/include/tensorwrapper/dsl/dsl_forward.hpp b/include/tensorwrapper/dsl/dsl_forward.hpp new file mode 100644 index 00000000..c756ebd7 --- /dev/null +++ b/include/tensorwrapper/dsl/dsl_forward.hpp @@ -0,0 +1,27 @@ +/* + * Copyright 2024 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +namespace tensorwrapper::dsl { + +template +class Labeled; + +template +class PairwiseParser; + +} // namespace tensorwrapper::dsl \ No newline at end of file diff --git a/include/tensorwrapper/dsl/dummy_indices.hpp b/include/tensorwrapper/dsl/dummy_indices.hpp new file mode 100644 index 00000000..73b50808 --- /dev/null +++ b/include/tensorwrapper/dsl/dummy_indices.hpp @@ -0,0 +1,240 @@ +/* + * Copyright 2024 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include +#include +#include +#include + +namespace tensorwrapper::dsl { + +/** @brief Class to help deal with string-based dummy indices. + * + * @tparam StringType The type used to hold the dummy indices. + * + * The DSL calls for the user to label each mode of a tensor (or shape, or + * buffer, or...) with a dummy index. This is usually done by providing a + * compile time literal like `"i,j,k"`, which would label the modes of a rank + * 3 tensor such that mode 0 is assigned dummy index `"i"`, mode 1 is assigned + * dummy index `"j"`, and mode 2 is assigned dummy index `"k"`. While strings + * are nice for the user, they're less nice for the developer. This class maps + * the string the user provided to an ordered sets of objects. The developer + * can then request common dummy index manipulations like set difference or + * permutation and let *this worry about the string manipulations. + * + * This class defines the string to dummy index conventions used throughout the + * TensorWrapper library, namely: + * + * - Dummy indices are separated by commas, i.e., `"i,jk,l"` defines three + * indices such that mode 0 is labeled by `"i"`, mode 1 by `"jk"`, and mode + * 2 by `"l"`. + * - Dummy indices can be multiple characters (see previous example) + * - Dummy indices are case-sensitive, i.e., `"i,J"` and `"i,j"` result in + * different dummy indices for mode 1. + * - Spaces are assumed to be for the user's clarity and are stripped prior + * to spliting i.e., `"i, j"` and `"i,j"` are the same set of indices. This + * also means `"my index,k"` will define a dummy index `"myindex"` for + * mode 0. + */ +template +class DummyIndices + : public utilities::IndexableContainerBase> { +private: + /// Type of *this + using my_type = DummyIndices; + + /// Type *this derives from + using base_type = utilities::IndexableContainerBase; + +public: + /// Type used to hold the string representation of the dummy indices + using value_type = StringType; + + /// Type of a mutable reference to a value_type + using reference = value_type&; + + /// Type of a read-only reference to a value_type object + using const_reference = const value_type&; + + /// Type of the string representation after splitting on commas + using split_string_type = std::vector; + + /// Type used for offsets + using size_type = typename split_string_type::size_type; + + /// Type used for returning ordered sets of size_type objects + using offset_vector = std::vector; + + /** @brief Creates an object with no dummy indices. + * + * Default constructed DummyIndices objects behave like they contain the + * dummy indices for a scalar. + * + * @throw None No throw guarantee. + */ + DummyIndices() = default; + + /** @brief Constructs a DummyIndices object by parsing a string. + * + * We assume that DummyIndices objects will be created directly from user + * input and that user input will be in a type implicitly convertible to + * `const_reference`. Under these assumptions, this ctor is the main user- + * facing ctor for the class. This ctor will first remove spaces in + * @p dummy_indices and then split the space-less string on commas. + * Finally, it will verify that the resulting vector of dummy indices has + * non-empty elements. + * + * @param[in] dummy_indices The string used to initialize *this. + * + * @throw std::runtime_error if @p dummy_indices contains one or more + * commas and if after splitting on the commas + * one or more of the resulting dummy indices is + * empty. + */ + explicit DummyIndices(const_reference dummy_indices) : + DummyIndices( + utilities::strings::split_string(remove_spaces_(dummy_indices), ",")) {} + + /** @brief Determines the number of unique indices in *this. + * + * A dummy index can be repeated if it is going to be summed over. This + * method analyzes the indices in *this and determines how many of them + * are unique. + * + * @return The number of indices which appear only once in *this. + * + * @throw std::bad_alloc if the temporary container can not be allocated. + * Strong throw guarantee. + */ + size_type unique_index_size() const { + std::set temp(this->begin(), this->end()); + return temp.size(); + } + + /** @brief Does *this have repeated indices? + * + * This method is used to determine if *this contains any index that + * appears more than once. + * + * @return True if *this contains a repeated index and false otherwise. + * + * @throw std::bad_alloc if the internal call to unique_index_size() + * throws. Strong throw guarantee. + */ + bool has_repeated_indices() const { + return unique_index_size() != this->size(); + } + + /** @brief Computes the permutation needed to convert *this into @p other. + * + * Each DummyIndices object is viewed as an ordered set of objects. If + * two DummyIndices objects contain the same objects, but in a different + * order, we can convert either object into the other by permuting it. + * This method computes the permutation needed to change *this into + * @p other. More specifically the result of this method is a vector + * of length `size()` such that the `i`-th element is the offset of + * `(*this)[i]` in @p other, i.e., if `x` is the return then + * `other[x[i]] == (*this)[i]`. + * + * @param[in] other The order we want to permute *this to. + * + * @return A vector such that the i-th element is the offset of + * `(*this)[i]` in @p other. + * + * @throw std::runtime_error if *this and @p other do not have the same + * size, or if either *this or @p other have + * repeated indices, or if an index in *this + * does not appear in @p other. Strong throw + * guarantee in each case. + * @throw std::bad_alloc if there is a problem allocating the return. + * Strong throw guarantee. + */ + offset_vector permutation(const DummyIndices& other) const { + if(this->size() != other.size()) + throw std::runtime_error("Must have same number of dummy indices."); + + if(has_repeated_indices() || other.has_repeated_indices()) + throw std::runtime_error("Must contain unique dummy indices."); + + offset_vector rv; + for(const auto& index : *this) { + auto indices = other.find(index); + if(indices.empty()) + throw std::runtime_error("Dummy index not found in other"); + rv.push_back(indices[0]); + } + return rv; + } + + /** @brief Finds the offset of @p index_to_find in *this. + * + * This method can be used to determine which modes the dummy index + * @p index_to_find maps to. If @p index_to_find does not appear in *this + * the result is empty. If @p index_to_find appears more than once the + * result will contain the offset for each appearance. + * + * @param[in] index_to_find The dummy index to determine the offset of. + * + * @return A container whose elements are the offsets of @p index_to_find + * in *this. + * + * @throw std::bad_alloc if there is a problem allocating the return. + * Strong throw guarantee. + */ + offset_vector find(const_reference index_to_find) const { + offset_vector rv; + for(size_type i = 0; i < this->size(); ++i) + if(m_dummy_indices_[i] == index_to_find) rv.push_back(i); + return rv; + } + +protected: + /// Main ctor for setting the value, throws if any index is empty + explicit DummyIndices(split_string_type split_dummy_indices) : + m_dummy_indices_(std::move(split_dummy_indices)) { + for(const auto& x : m_dummy_indices_) + if(x.empty()) + throw std::runtime_error( + "Dummy index is not allowed to be empty"); + } + + /// Lets the base class get at these implementations + friend base_type; + + /// Implements mutable element retrieval by forwarding to m_dummy indices_ + reference at_(size_type i) { return m_dummy_indices_[i]; } + + /// Implements read-only element retrieval by forwarding to m_dummy indices_ + const_reference at_(size_type i) const { return m_dummy_indices_[i]; } + + /// Implements size by calling m_dummy indices_.size() + size_type size_() const noexcept { return m_dummy_indices_.size(); } + +private: + /// Helper method for stripping spaces from the string @p input. + static auto remove_spaces_(const_reference input) { + value_type rv; + for(const auto c : input) + if(c != ' ') rv.push_back(c); + return rv; + } + + /// The split dummy indices + split_string_type m_dummy_indices_; +}; + +} // namespace tensorwrapper::dsl \ No newline at end of file diff --git a/include/tensorwrapper/dsl/labeled.hpp b/include/tensorwrapper/dsl/labeled.hpp new file mode 100644 index 00000000..7a4d4f89 --- /dev/null +++ b/include/tensorwrapper/dsl/labeled.hpp @@ -0,0 +1,129 @@ +#pragma once +/* + * Copyright 2024 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include +#include +#include +#include +namespace tensorwrapper::dsl { + +/** @brief Represents an object whose modes are assigned dummy indices. + */ +template +class Labeled : public utilities::dsl::BinaryOp, + ObjectType, LabelType> { +private: + /// Type of *this + using my_type = Labeled; + + /// Type *this inherits from + using op_type = utilities::dsl::BinaryOp; + + /// Is T cv-qualified? + template + static constexpr bool is_cv_v = !std::is_same_v, T>; + + /// Is ObjectType cv-qualified? + static constexpr bool has_cv_object_v = is_cv_v; + + /// Does *this have a cv-qualified object and T is mutable? + template + static constexpr bool is_cv_conversion_v = has_cv_object_v && !is_cv_v; + + /// Enables a function when it is being called to convert a const object. + template + using enable_if_cv_conversion_t = std::enable_if_t>; + +public: + /// Type of the object (useful for TMP) + using object_type = std::decay_t; + + /// Type of the labels (useful for TMP) + using label_type = LabelType; + + /** @brief Creates a Labeled object that does not alias an object or labels. + * + * This ctor is needed because the base classes assume it is present. + * Users shouldn't actually need it. + * + * @throw None No throw guarantee. + */ + Labeled() = default; + + /** @brief Ensures labels are stored correctly. + * + * @tparam ObjectType2 The type of @p object. Must be implicitly + * convertible to @p ObjectType. + * @tparam LabelType2 The type of @p labels. Must be implicitly + * convertible to @p LabelType. + * + * It is common for the labels to actually be a string literal, e.g., + * code like `"i,j"`. Type detection for such a type will not match it + * to LabelType. We solve this by using this ctor to explicitly convert + * @p labels into LabelType before the base class does its TMP. + * + * @param[in] object The object the labels apply to. + * @param[in] labels The annotations for the tensor. + * + * @throw std::bad_alloc if converting @p labels to LabelType throws. + * Strong throw guarantee. + */ + template + Labeled(ObjectType2&& object, LabelType2&& labels) : + op_type(std::forward(object), + LabelType(std::forward(labels))) {} + + /** @brief Allows implicit conversion from mutable objects to const objects + * + * @p ObjectType may have cv-qualifiers. This ctor allows Labeled instances + * aliasing mutable objects to be used when Labeled instances aliasing + * read-only objects are needed. + * + * @tparam ObjectType2 The object type stored in @p input. Must be + * equivalent to `const ObjectType`. + * @tparam Used to disable this overload via SFINAE if + * ObjectType2 != `const ObjectType` or if + */ + template> + Labeled(const Labeled& input) : + Labeled(input.lhs(), input.rhs()) {} + + /** @brief Assigns a DSL term to *this. + * + * @tparam TermType The type of the expression being assigned to *this. + * + * Under most circumstances execution of the DSL happens when an + * expression is assigned to Labeled object. The assignment happens via + * this method. + * + * @param[in] other The expression to assign to *this. + * + * @return *this after assigning @p other to *this. + */ + template + my_type& operator=(TermType&& other) { + // TODO: other should be rolled into a tensor graph object that can be + // manipulated at runtime. Parser is then moved to the backend + PairwiseParser p; + *this = p.dispatch(std::move(*this), std::forward(other)); + return *this; + } +}; + +} // namespace tensorwrapper::dsl \ No newline at end of file diff --git a/include/tensorwrapper/dsl/pairwise_parser.hpp b/include/tensorwrapper/dsl/pairwise_parser.hpp new file mode 100644 index 00000000..c8834ebe --- /dev/null +++ b/include/tensorwrapper/dsl/pairwise_parser.hpp @@ -0,0 +1,86 @@ +/* + * Copyright 2024 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include +#include + +namespace tensorwrapper { +class Tensor; +namespace dsl { + +/** @brief Object which evaluates the AST of an expression pairwise. + * + * @tparam ObjectType The type of the objects associated with the dummy + * indices. Expected to be possibly cv-qualified versions + * of Tensor, buffers, shapes, etc. + * @tparam LabelType The type of object used for the dummy indices. + * + * The easiest way to evaluate an abstract syntax tree which contains + * operations involving at most two objects is by splitting it into subtrees + * which contain at most two connected nodes, i.e., considering each operation + * pairwise. That's what this parser does. + */ +template +class PairwiseParser { +public: + /// Type of a leaf in the AST + using labeled_type = Labeled; + + /** @brief Recursion end-point + * + * Evaluates @p rhs given that it will be evaluated into lhs. + * This is the natural end-point for recursion down a branch of the AST. + * + * N.b., this overload is only responsible for evaluating @p rhs NOT for + * assigning it to @p lhs. + * + * @param[in] lhs The object that @p rhs will ultimately be assigned to. + * @param[in] rhs The "expression" that needs to be evaluated. + * + * @return @p rhs untouched. + * + * @throw None No throw guarantee. + */ + auto dispatch(labeled_type lhs, labeled_type rhs) { return rhs; } + + /** @brief Handles adding two expressions together. + * + * @tparam T The type of the expression on the left side of the "+" sign. + * @tparam U The type of the expression on the right side of the "+" sign. + * + * @param[in] lhs The object that @p rhs will ultimately be assigned to. + * @param[in] rhs The expression to evaluate. + * + * + */ + template + auto dispatch(labeled_type lhs, const utilities::dsl::Add& rhs) { + // TODO: This shouldn't be assigning to lhs, but letting the layer up + // do that + auto lA = dispatch(lhs, rhs.lhs()); + auto lB = dispatch(lhs, rhs.rhs()); + return add(std::move(lhs), std::move(lA), std::move(lB)); + } + +protected: + labeled_type add(labeled_type result, labeled_type lhs, labeled_type rhs); +}; + +extern template class PairwiseParser; + +} // namespace dsl +} // namespace tensorwrapper \ No newline at end of file diff --git a/include/tensorwrapper/tensor/detail_/tensor_input.hpp b/include/tensorwrapper/tensor/detail_/tensor_input.hpp index 92d3a317..b874e311 100644 --- a/include/tensorwrapper/tensor/detail_/tensor_input.hpp +++ b/include/tensorwrapper/tensor/detail_/tensor_input.hpp @@ -102,12 +102,18 @@ struct TensorInput { /// Type all buffer object's inherit from using buffer_base = typename allocator_base::buffer_base_type; + /// Type of a mutable reference to a buffer_base object + using buffer_reference = typename buffer_base::base_reference; + /// Type of a read-only reference to an object of type buffer_base using const_buffer_reference = typename buffer_base::const_base_reference; /// Type of a pointer to an object of type buffer_base using buffer_pointer = typename buffer_base::base_pointer; + /// Type of a pointer to a read-only buffer_base object + using const_buffer_pointer = typename buffer_base::const_base_pointer; + /// Type of a view of the runtime using runtime_view_type = typename allocator_base::runtime_view_type; diff --git a/include/tensorwrapper/tensor/tensor_class.hpp b/include/tensorwrapper/tensor/tensor_class.hpp index 65cb1683..1ad7eaed 100644 --- a/include/tensorwrapper/tensor/tensor_class.hpp +++ b/include/tensorwrapper/tensor/tensor_class.hpp @@ -15,6 +15,7 @@ */ #pragma once +#include #include namespace tensorwrapper { @@ -32,8 +33,19 @@ class Tensor { /// Type of a helper class which collects the inputs needed to make a tensor using input_type = detail_::TensorInput; + /// Type for determining if @p T is the type of a tensor? template - using disable_if_tensor_t = std::enable_if_t, T>; + using is_tensor_t = std::is_same; + + /// Are any of the types in @p Args equal to Tensor? + template + static constexpr bool are_any_tensors_v = + std::disjunction_v...>; + + /// Enables a function so long as no type in @p Args is Tensor + template + using enable_if_no_tensors_t = + std::enable_if_t>; public: /// Type of the object implementing *this @@ -51,12 +63,18 @@ class Tensor { /// Type of a pointer to the tensor's logical layout using logical_layout_pointer = input_type::logical_layout_pointer; + /// Type of a mutable reference to the tensor's buffer + using buffer_reference = input_type::buffer_reference; + /// Type of a read-only reference to the tensor's buffer using const_buffer_reference = input_type::const_buffer_reference; /// Type of a pointer to the tensor's buffer using buffer_pointer = input_type::buffer_pointer; + /// Type of a pointer to a read-only buffer + using const_buffer_pointer = input_type::const_buffer_pointer; + /// Type of an initializer list if *this is a scalar using scalar_il_type = double; @@ -72,6 +90,20 @@ class Tensor { /// Type of an initializer list if *this is a rank 4 tensor using tensor4_il_type = std::initializer_list; + /// Type of a label + using label_type = std::string; + + /// Type of a read-only reference to an object of type label_type + using const_label_reference = const label_type&; + + /// Type of a labeled tensor + using labeled_tensor_type = dsl::Labeled; + + /// Type of a read-only labeled tensor + using const_labeled_tensor_type = dsl::Labeled; + + // Tensor() : Tensor(input_type{}) {} + /** @brief Initializes *this by processing the input provided in @p input. * * This ctor is only public to facilitate unit testing of the library. @@ -92,6 +124,9 @@ class Tensor { /** @brief Variadic value ctor. * * @tparam Args The types of the arguments. + * @tparam Template parameter to disable this overload via + * SFINAE if any of the provided arguments are Tensor + * objects. * * @note The intent is to create a tutorial showcasing how to initialize * the Tensor object under different conditions and NOT for the user @@ -141,9 +176,9 @@ class Tensor { * @throw std::bad_alloc if there is a problem allocating the state for * *this. Strong throw guarantee. */ - template - Tensor(disable_if_tensor_t&&... args) : - Tensor(input_type(std::forward(args)...)) {} + template...>> + Tensor(Args&&... args) : Tensor(input_type{std::forward(args)...}) {} /** @brief Creates a tensor from a (possibly) nested initializer list. * @@ -246,13 +281,25 @@ class Tensor { */ const_logical_reference logical_layout() const; - /** @brief Read-only access to the tensor's buffer. + /** @brief Mutable access to the tensor's buffer. * * The buffer of a tensor contains the actual elements. Generally speaking, * users should not have to interact with the buffer. The primary * exception to this is if the user wants to interface TensorWrapper with * another tensor solution. * + * @return A mutable reference to the buffer of the tensor. + * + * @throw std::runtime_error if *this is an empty tensor. Strong throw + * guarantee. + */ + buffer_reference buffer(); + + /** @brief Read-only access to the tensor's buffer. + * + * This method is the same as the non-const version except that resulting + * reference is read-only. + * * @return A read-only reference to the buffer of the tensor. * * @throw std::runtime_error if *this is an empty tensor. Strong throw @@ -260,6 +307,42 @@ class Tensor { */ const_buffer_reference buffer() const; + /** @brief Associates @p labels with the modes of *this. + * + * Expressing tensor operations is easier with the use of the Einstein + * summation convention. Usage of this convention requires the user to be + * able to associate dummy indices with the modes of the tensor. This + * function pairs @p labels with the modes of *this such that the i-th + * dummy index of @p labels is paired with the i-th mode of *this. + * + * See dsl::DummyIndices for how the string is interpreted. + * + * Note that if *this is a rank 0 tensor @p labels should be the empty + * string. + * + * @param[in] labels The dummy indices to associate with each mode. + * + * @return A DSL term pairing *this with @p labels. + * + */ + labeled_tensor_type operator()(const_label_reference labels) { + return labeled_tensor_type(*this, labels); + } + + /** @brief Associates @p labels with the modes of *this. + * + * This method is the same as the non-const version except that the + * resulting DSL term contains a reference to an immutable tensor. + * + * @param[in] labels The dummy indices to associate with each mode. + * + * @return A DSL term pairing *this with @p labels. + * + */ + const_labeled_tensor_type operator()(const_label_reference labels) const { + return const_labeled_tensor_type(*this, labels); + } + // ------------------------------------------------------------------------- // -- Utility methods // ------------------------------------------------------------------------- diff --git a/include/tensorwrapper/tensorwrapper.hpp b/include/tensorwrapper/tensorwrapper.hpp index 51a0376c..864aa32c 100644 --- a/include/tensorwrapper/tensorwrapper.hpp +++ b/include/tensorwrapper/tensorwrapper.hpp @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include diff --git a/src/tensorwrapper/allocator/eigen.cpp b/src/tensorwrapper/allocator/eigen.cpp index 2befd3e8..67503db3 100644 --- a/src/tensorwrapper/allocator/eigen.cpp +++ b/src/tensorwrapper/allocator/eigen.cpp @@ -15,6 +15,7 @@ */ #include +#include #include #include @@ -45,6 +46,27 @@ typename EIGEN::eigen_buffer_pointer EIGEN::allocate( *playout); } +TPARAMS +bool EIGEN::can_rebind(const_buffer_base_reference buffer) { + auto pbuffer = dynamic_cast(&buffer); + return pbuffer != nullptr; +} + +TPARAMS +typename EIGEN::eigen_buffer_reference EIGEN::rebind( + buffer_base_reference buffer) { + if(can_rebind(buffer)) return static_cast(buffer); + throw std::runtime_error("Can not rebind buffer"); +} + +TPARAMS +typename EIGEN::const_eigen_buffer_reference EIGEN::rebind( + const_buffer_base_reference buffer) { + if(can_rebind(buffer)) + return dynamic_cast(buffer); + throw std::runtime_error("Can not rebind buffer"); +} + #define ALLOCATE_CONDITION(RANK) \ if(rank == RANK) return std::make_unique>(rv) diff --git a/src/tensorwrapper/buffer/buffer_base.cpp b/src/tensorwrapper/buffer/buffer_base.cpp new file mode 100644 index 00000000..e40c30cc --- /dev/null +++ b/src/tensorwrapper/buffer/buffer_base.cpp @@ -0,0 +1,31 @@ +/* + * Copyright 2024 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +namespace tensorwrapper::buffer { + +typename BufferBase::labeled_buffer_type BufferBase::operator()( + label_type labels) { + return labeled_buffer_type(*this, std::move(labels)); +} + +typename BufferBase::labeled_const_buffer_type BufferBase::operator()( + label_type labels) const { + return labeled_const_buffer_type(*this, std::move(labels)); +} + +} // namespace tensorwrapper::buffer \ No newline at end of file diff --git a/src/tensorwrapper/buffer/eigen.cpp b/src/tensorwrapper/buffer/eigen.cpp new file mode 100644 index 00000000..78c3a2d8 --- /dev/null +++ b/src/tensorwrapper/buffer/eigen.cpp @@ -0,0 +1,104 @@ +/* + * Copyright 2024 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +namespace tensorwrapper::buffer { + +using dummy_indices_type = dsl::DummyIndices; + +#define TPARAMS template +#define EIGEN Eigen + +TPARAMS +typename EIGEN::buffer_base_reference EIGEN::addition_assignment_( + label_type this_labels, const_labeled_buffer_reference rhs) { + // TODO layouts + if(layout() != rhs.lhs().layout()) + throw std::runtime_error("Layouts must be the same (for now)"); + + dummy_indices_type llabels(this_labels); + dummy_indices_type rlabels(rhs.rhs()); + + using allocator_type = allocator::Eigen; + const auto& rhs_downcasted = allocator_type::rebind(rhs.lhs()); + + if(llabels != rlabels) { + auto r_to_l = rlabels.permutation(llabels); + std::vector r_to_l2(r_to_l.begin(), r_to_l.end()); + m_tensor_ += rhs_downcasted.value().shuffle(r_to_l2); + } else { + m_tensor_ += rhs_downcasted.value(); + } + + return *this; +} + +TPARAMS +typename EIGEN::buffer_base_reference EIGEN::permute_assignment_( + label_type this_labels, const_labeled_buffer_reference rhs) { + dummy_indices_type llabels(this_labels); + dummy_indices_type rlabels(rhs.rhs()); + + using allocator_type = allocator::Eigen; + const auto& rhs_downcasted = allocator_type::rebind(rhs.lhs()); + + if(llabels != rlabels) { // We need to permute rhs before assignment + auto r_to_l = rlabels.permutation(llabels); + // Eigen wants int objects + std::vector r_to_l2(r_to_l.begin(), r_to_l.end()); + m_tensor_ = rhs_downcasted.value().shuffle(r_to_l2); + } else { + m_tensor_ = rhs_downcasted.value(); + } + + // TODO: permute layout + + return *this; +} + +TPARAMS +typename EIGEN::string_type EIGEN::to_string_() const { + std::stringstream ss; + ss << m_tensor_; + return ss.str(); +} + +#undef EIGEN +#undef TPARAMS + +#define DEFINE_EIGEN_BUFFER(RANK) \ + template class Eigen; \ + template class Eigen + +DEFINE_EIGEN_BUFFER(0); +DEFINE_EIGEN_BUFFER(1); +DEFINE_EIGEN_BUFFER(2); +DEFINE_EIGEN_BUFFER(3); +DEFINE_EIGEN_BUFFER(4); +DEFINE_EIGEN_BUFFER(5); +DEFINE_EIGEN_BUFFER(6); +DEFINE_EIGEN_BUFFER(7); +DEFINE_EIGEN_BUFFER(8); +DEFINE_EIGEN_BUFFER(9); +DEFINE_EIGEN_BUFFER(10); + +#undef DEFINE_EIGEN_BUFFER + +} // namespace tensorwrapper::buffer \ No newline at end of file diff --git a/src/tensorwrapper/dsl/pairwise_parser.cpp b/src/tensorwrapper/dsl/pairwise_parser.cpp new file mode 100644 index 00000000..8a2c9189 --- /dev/null +++ b/src/tensorwrapper/dsl/pairwise_parser.cpp @@ -0,0 +1,82 @@ +/* + * Copyright 2024 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +namespace tensorwrapper::dsl { +namespace { +struct CallAddition { + template + static decltype(auto) run(LHSType&& lhs, RHSType&& rhs) { + const auto& llabels = lhs.rhs(); + return lhs.lhs().addition(llabels, std::forward(rhs)); + } +}; + +template +decltype(auto) binary_op(ResultType&& result, LHSType&& lhs, RHSType&& rhs) { + auto& rv_object = result.lhs(); + const auto& lhs_object = lhs.lhs(); + const auto& rhs_object = rhs.lhs(); + + const auto& lhs_labels = lhs.rhs(); + const auto& rhs_labels = rhs.rhs(); + + using object_type = typename std::decay_t::object_type; + + if constexpr(std::is_same_v) { + if(rv_object == Tensor{}) { + const auto& llayout = lhs_object.logical_layout(); + // const auto& rlayout = rhs_object.logical_layout(); + std::decay_t rv_layout( + llayout); // FunctorType::run(llayout(lhs_labels), + // rlayout(rhs_labels)); + + auto lbuffer = lhs_object.buffer()(lhs_labels); + auto rbuffer = rhs_object.buffer()(rhs_labels); + auto buffer = FunctorType::run(lbuffer, rbuffer); + + // TODO figure out permutation + Tensor(std::move(rv_layout), std::move(buffer)).swap(rv_object); + } else { + throw std::runtime_error("Hints are not allowed yet!"); + } + } else { + // Getting here means the assert will fail + static_assert(std::is_same_v, "NYI"); + } + return result; +} +} // namespace + +#define TPARAMS template +#define PARSER PairwiseParser +#define LABELED_TYPE typename PARSER::labeled_type + +TPARAMS LABELED_TYPE PARSER::add(labeled_type result, labeled_type lhs, + labeled_type rhs) { + return binary_op(result, lhs, rhs); +} + +#undef PARSER +#undef TPARAMS + +template class PairwiseParser; + +} // namespace tensorwrapper::dsl \ No newline at end of file diff --git a/src/tensorwrapper/tensor/detail_/tensor_factory.cpp b/src/tensorwrapper/tensor/detail_/tensor_factory.cpp index 7023316f..c415b13b 100644 --- a/src/tensorwrapper/tensor/detail_/tensor_factory.cpp +++ b/src/tensorwrapper/tensor/detail_/tensor_factory.cpp @@ -19,6 +19,7 @@ #include "tensor_pimpl.hpp" #include #include +#include #include namespace tensorwrapper::detail_ { diff --git a/src/tensorwrapper/tensor/tensor_class.cpp b/src/tensorwrapper/tensor/tensor_class.cpp index d40c94e3..559b37ad 100644 --- a/src/tensorwrapper/tensor/tensor_class.cpp +++ b/src/tensorwrapper/tensor/tensor_class.cpp @@ -21,6 +21,7 @@ namespace tensorwrapper { using const_logical_reference = typename Tensor::const_logical_reference; +using buffer_reference = typename Tensor::buffer_reference; using const_buffer_reference = typename Tensor::const_buffer_reference; // -- Ctors, assignment, and dtor @@ -64,6 +65,11 @@ const_logical_reference Tensor::logical_layout() const { return m_pimpl_->logical_layout(); } +buffer_reference Tensor::buffer() { + assert_pimpl_(); + return m_pimpl_->buffer(); +} + const_buffer_reference Tensor::buffer() const { assert_pimpl_(); return m_pimpl_->buffer(); diff --git a/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp b/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp index 8ac06fdf..ef83631c 100644 --- a/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp @@ -17,6 +17,7 @@ #include "../helpers.hpp" #include #include +#include #include using namespace tensorwrapper; @@ -124,6 +125,29 @@ TEMPLATE_TEST_CASE("EigenAllocator", "", float, double) { REQUIRE_THROWS_AS(scalar_alloc.allocate(vector_layout), except_t); } + SECTION("can_rebind") { + REQUIRE(scalar_alloc.can_rebind(scalar_corr)); + REQUIRE_FALSE(scalar_alloc.can_rebind(vector_corr)); + } + + SECTION("rebind(non-const)") { + using type = typename scalar_alloc_type::buffer_base_reference; + type scalar_base = scalar_corr; + auto& eigen_buffer = scalar_alloc.rebind(scalar_base); + REQUIRE(&eigen_buffer == &scalar_corr); + REQUIRE_THROWS_AS(scalar_alloc.rebind(vector_corr), std::runtime_error); + } + + SECTION("rebind(const)") { + using type = typename scalar_alloc_type::const_buffer_base_reference; + type scalar_base = scalar_corr; + auto& eigen_buffer = scalar_alloc.rebind(scalar_base); + REQUIRE(&eigen_buffer == &scalar_corr); + + type vector_base = vector_corr; + REQUIRE_THROWS_AS(scalar_alloc.rebind(vector_base), std::runtime_error); + } + SECTION("operator==") { REQUIRE(scalar_alloc == scalar_alloc_type(rv)); REQUIRE_FALSE(scalar_alloc == vector_alloc); diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp index f17e1aa6..b82bbcee 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp @@ -27,6 +27,9 @@ using namespace buffer; * - BufferBase is an abstract class. To test it we must create an instance of * a derived class. We then will upcast to BufferBase and perform checks * through the BufferBase interface. + * - `xxx_assignment` methods are tested in the derived classes; however, the + * corresponding `xxx` method is defined in BufferBase and thus is tested + * here (`xxx` being `addition`, `subtraction`, etc.). * */ @@ -67,6 +70,38 @@ TEST_CASE("BufferBase") { REQUIRE(vector_base.layout().are_equal(vector_layout)); } + SECTION("addition") { + scalar_buffer scalar2(eigen_scalar, scalar_layout); + scalar2.value()() = 42.0; + + auto s = scalar(""); + auto pscalar2 = scalar2.addition("", s); + + scalar_buffer scalar_corr(eigen_scalar, scalar_layout); + scalar_corr.value()() = 43.0; + REQUIRE(*pscalar2 == scalar_corr); + } + + SECTION("operator()(std::string)") { + auto labeled_scalar = scalar_base(""); + REQUIRE(labeled_scalar.lhs().are_equal(scalar_base)); + REQUIRE(labeled_scalar.rhs() == ""); + + auto labeled_vector = vector_base("i"); + REQUIRE(labeled_vector.lhs().are_equal(vector_base)); + REQUIRE(labeled_vector.rhs() == "i"); + } + + SECTION("operator()(std::string) const") { + auto labeled_scalar = std::as_const(scalar_base)(""); + REQUIRE(labeled_scalar.lhs().are_equal(scalar_base)); + REQUIRE(labeled_scalar.rhs() == ""); + + auto labeled_vector = std::as_const(vector_base)("i"); + REQUIRE(labeled_vector.lhs().are_equal(vector_base)); + REQUIRE(labeled_vector.rhs() == "i"); + } + SECTION("operator==") { // Defaulted layout == defaulted layout REQUIRE(defaulted_base == scalar_buffer()); diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp index 73572c8e..9d23cad9 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp @@ -159,6 +159,98 @@ TEMPLATE_TEST_CASE("Eigen", "", float, double) { REQUIRE(pscalar.are_equal(scalar2)); REQUIRE_FALSE(pmatrix.are_equal(scalar2)); } + + SECTION("addition_assignment") { + SECTION("scalar") { + scalar_buffer scalar2(eigen_scalar, scalar_layout); + scalar2.value()() = 42.0; + + auto s = scalar(""); + auto pscalar2 = &(scalar2.addition_assignment("", s)); + + scalar_buffer scalar_corr(eigen_scalar, scalar_layout); + scalar_corr.value()() = 43.0; + REQUIRE(pscalar2 == &scalar2); + REQUIRE(scalar2 == scalar_corr); + } + + SECTION("vector") { + vector_buffer vector2(eigen_vector, vector_layout); + + auto vi = vector("i"); + auto pvector2 = &(vector2.addition_assignment("i", vi)); + + vector_buffer vector_corr(eigen_vector, vector_layout); + vector_corr.value()(0) = 2.0; + vector_corr.value()(1) = 4.0; + + REQUIRE(pvector2 == &vector2); + REQUIRE(vector2 == vector_corr); + } + + SECTION("matrix") { + matrix_buffer matrix2(eigen_matrix, matrix_layout); + + auto mij = matrix("i,j"); + auto pmatrix2 = &(matrix2.addition_assignment("i,j", mij)); + + matrix_buffer matrix_corr(eigen_matrix, matrix_layout); + + matrix_corr.value()(0, 0) = 2.0; + matrix_corr.value()(0, 1) = 4.0; + matrix_corr.value()(0, 2) = 6.0; + matrix_corr.value()(1, 0) = 8.0; + matrix_corr.value()(1, 1) = 10.0; + matrix_corr.value()(1, 2) = 12.0; + + REQUIRE(pmatrix2 == &matrix2); + REQUIRE(matrix2 == matrix_corr); + + // SECTION("permutation") { + // layout::Physical l(shape::Smooth{3, 2}, g, p); + // std::array p10{1, 0}; + // auto eigen_matrix_t = eigen_matrix.shuffle(p10); + // matrix_buffer matrix3(eigen_matrix_t, l); + + // auto pmatrix3 = + // &(matrix3.addition_assignment("j,i", mij)); + + // matrix_buffer corr(eigen_matrix_t, l); + // corr.value()(0, 0) = 3.0; + // corr.value()(0, 1) = 6.0; + // corr.value()(1, 0) = 9.0; + // corr.value()(1, 1) = 12.0; + // corr.value()(2, 0) = 15.0; + // corr.value()(2, 1) = 18.0; + + // REQUIRE(pmatrix3 == &matrix3); + // REQUIRE(matrix3 == corr); + // } + } + + // Can't cast + REQUIRE_THROWS_AS(vector.addition_assignment("", scalar("")), + std::runtime_error); + + // Labels must match + REQUIRE_THROWS_AS(vector.addition_assignment("j", vector("i")), + std::runtime_error); + } + + SECTION("permute_assignment") { + // layout::Physical l(shape::Smooth{3, 2}, g, p); + // std::array p10{1, 0}; + // auto eigen_matrix_t = eigen_matrix.shuffle(p10); + // matrix_buffer corr(eigen_matrix_t, l); + + // matrix_buffer matrix2; + + // auto& mij = matrix("i,j"); + // auto pmatrix2 = &(matrix2.permute_assignment("j,i", mij)); + + // REQUIRE(pmatrix2 == &matrix2); + // REQUIRE(matrix2 == corr); + } } } } diff --git a/tests/cxx/unit_tests/tensorwrapper/dsl/dummy_indices.cpp b/tests/cxx/unit_tests/tensorwrapper/dsl/dummy_indices.cpp new file mode 100644 index 00000000..e279ebf0 --- /dev/null +++ b/tests/cxx/unit_tests/tensorwrapper/dsl/dummy_indices.cpp @@ -0,0 +1,161 @@ +/* + * Copyright 2024 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../testing/testing.hpp" +#include + +using namespace tensorwrapper; + +TEST_CASE("DummyIndices") { + using dummy_indices_type = dsl::DummyIndices; + + dummy_indices_type defaulted; + dummy_indices_type scalar(""); + dummy_indices_type vector("i"); + dummy_indices_type matrix("i, j"); + dummy_indices_type tensor("i, jk, l"); + + SECTION("CTors") { + SECTION("defaulted") { REQUIRE(defaulted.size() == 0); } + + SECTION("string value") { + REQUIRE(scalar.size() == 0); + + REQUIRE(vector.size() == 1); + REQUIRE(vector[0] == "i"); + + REQUIRE(matrix.size() == 2); + REQUIRE(matrix[0] == "i"); + REQUIRE(matrix[1] == "j"); + + REQUIRE(tensor.size() == 3); + REQUIRE(tensor[0] == "i"); + REQUIRE(tensor[1] == "jk"); + REQUIRE(tensor[2] == "l"); + + // Dummy indices can't be empty + REQUIRE_THROWS_AS(dummy_indices_type("i, "), std::runtime_error); + } + + testing::test_copy_move_ctor_and_assignment(defaulted, scalar, vector, + matrix, tensor); + } + + SECTION("unique_index_size") { + REQUIRE(defaulted.unique_index_size() == 0); + REQUIRE(scalar.unique_index_size() == 0); + REQUIRE(vector.unique_index_size() == 1); + REQUIRE(matrix.unique_index_size() == 2); + REQUIRE(tensor.unique_index_size() == 3); + REQUIRE(dummy_indices_type("i,i").unique_index_size() == 1); + } + + SECTION("has_repeated_indices") { + REQUIRE_FALSE(defaulted.has_repeated_indices()); + REQUIRE_FALSE(scalar.has_repeated_indices()); + REQUIRE_FALSE(vector.has_repeated_indices()); + REQUIRE_FALSE(matrix.has_repeated_indices()); + REQUIRE_FALSE(tensor.has_repeated_indices()); + REQUIRE(dummy_indices_type("i,i").has_repeated_indices()); + } + + SECTION("permutation") { + using offset_vector = typename dummy_indices_type::offset_vector; + + REQUIRE(scalar.permutation(scalar) == offset_vector{}); + + REQUIRE(vector.permutation(vector) == offset_vector{0}); + + dummy_indices_type matrix2("j,i"); + REQUIRE(matrix.permutation(matrix) == offset_vector{0, 1}); + REQUIRE(matrix.permutation(matrix2) == offset_vector{1, 0}); + REQUIRE(matrix2.permutation(matrix) == offset_vector{1, 0}); + + dummy_indices_type tensor2("jk, i, l"); + dummy_indices_type tensor3("l, jk, i"); + dummy_indices_type tensor4("i,l,jk"); + dummy_indices_type tensor5("l,i,jk"); + dummy_indices_type tensor6("jk, l, i"); + REQUIRE(tensor.permutation(tensor) == offset_vector{0, 1, 2}); + REQUIRE(tensor.permutation(tensor2) == offset_vector{1, 0, 2}); + REQUIRE(tensor.permutation(tensor3) == offset_vector{2, 1, 0}); + REQUIRE(tensor.permutation(tensor4) == offset_vector{0, 2, 1}); + REQUIRE(tensor.permutation(tensor5) == offset_vector{1, 2, 0}); + REQUIRE(tensor.permutation(tensor6) == offset_vector{2, 0, 1}); + + dummy_indices_type repeated("i,i"); + + // Must have same number of indices + REQUIRE_THROWS_AS(scalar.permutation(vector), std::runtime_error); + + // *this can't have repeated indics + REQUIRE_THROWS_AS(repeated.permutation(matrix), std::runtime_error); + + // other can't have repeated indices + REQUIRE_THROWS_AS(matrix.permutation(repeated), std::runtime_error); + + // error if index isn't in both + dummy_indices_type other("j"); + + REQUIRE_THROWS_AS(vector.permutation(other), std::runtime_error); + } + + SECTION("find(const_reference)") { + using offset_vector = typename dummy_indices_type::offset_vector; + REQUIRE(defaulted.find("") == offset_vector{}); + + REQUIRE(scalar.find("") == offset_vector{}); + + REQUIRE(vector.find("i") == offset_vector{0}); + REQUIRE(vector.find("j") == offset_vector{}); + + REQUIRE(matrix.find("i") == offset_vector{0}); + REQUIRE(matrix.find("j") == offset_vector{1}); + + REQUIRE(tensor.find("i") == offset_vector{0}); + REQUIRE(tensor.find("jk") == offset_vector{1}); + REQUIRE(tensor.find("l") == offset_vector{2}); + + REQUIRE(dummy_indices_type("i,i").find("i") == offset_vector{0, 1}); + } + + SECTION("comparison") { + // Default construction is indistinguishable from scalar indices + REQUIRE(defaulted == scalar); + + // Different ranks are different + REQUIRE_FALSE(defaulted == vector); + + // Same vector indices + REQUIRE(vector == dummy_indices_type("i")); + + // Different vector indices + REQUIRE_FALSE(vector == dummy_indices_type("j")); + + // Same matrix indices + REQUIRE(matrix == dummy_indices_type("i,j")); + + // Spaces aren't significant + REQUIRE(matrix == dummy_indices_type("i, j")); + REQUIRE(matrix == dummy_indices_type(" i , j ")); + + // Are case sensitive + REQUIRE_FALSE(matrix == dummy_indices_type("I,j")); + + // Permutations are different + REQUIRE_FALSE(matrix == dummy_indices_type("j,i")); + } +} diff --git a/tests/cxx/unit_tests/tensorwrapper/dsl/labeled.cpp b/tests/cxx/unit_tests/tensorwrapper/dsl/labeled.cpp new file mode 100644 index 00000000..95788bf3 --- /dev/null +++ b/tests/cxx/unit_tests/tensorwrapper/dsl/labeled.cpp @@ -0,0 +1,77 @@ +/* + * Copyright 2024 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../testing/testing.hpp" +#include + +using namespace tensorwrapper; + +using test_types = std::tuple; + +TEMPLATE_LIST_TEST_CASE("Labeled", "", test_types) { + using object_type = TestType; + using labeled_type = dsl::Labeled; + using labels_type = typename labeled_type::label_type; + + labels_type ij("i,j"); + object_type defaulted{}; + labeled_type labeled_default(defaulted, ij); + + SECTION("Ctor") { + SECTION("Value") { + REQUIRE(labeled_default.lhs() == defaulted); + REQUIRE(labeled_default.rhs() == ij); + } + + SECTION("to const") { + using const_labeled_type = dsl::Labeled; + const_labeled_type const_labeled_default(labeled_default); + + REQUIRE(const_labeled_default.lhs() == defaulted); + REQUIRE(const_labeled_default.rhs() == ij); + } + } + + SECTION("operator=") { + // At present this operator just calls Parser dispatch. We know that + // works from other tests so here we just spot check. + Tensor t; + + SECTION("scalar") { + Tensor scalar(testing::smooth_scalar()); + auto labeled_t = t(""); + auto plabeled_t = &(labeled_t = scalar("") + scalar("")); + REQUIRE(plabeled_t == &labeled_t); + + auto buffer = testing::eigen_scalar(); + buffer.value()() = 84.0; + Tensor corr(scalar.logical_layout(), std::move(buffer)); + REQUIRE(t == corr); + } + + SECTION("Vector") { + Tensor vector(testing::smooth_vector()); + auto labeled_t = t("i"); + auto plabeled_t = &(labeled_t = vector("i") + vector("i")); + REQUIRE(plabeled_t == &labeled_t); + + auto buffer = testing::eigen_vector(); + for(std::size_t i = 0; i < 5; ++i) buffer.value()(i) = i + i; + Tensor corr(t.logical_layout(), std::move(buffer)); + REQUIRE(t == corr); + } + } +} \ No newline at end of file diff --git a/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp b/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp new file mode 100644 index 00000000..5c8ca0d5 --- /dev/null +++ b/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp @@ -0,0 +1,53 @@ +/* + * Copyright 2024 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../testing/testing.hpp" +#include + +using namespace tensorwrapper; + +TEST_CASE("PairwiseParser") { + Tensor scalar(testing::smooth_scalar()); + Tensor vector(testing::smooth_vector()); + + dsl::PairwiseParser p; + + SECTION("add") { + Tensor t; + + SECTION("scalar") { + auto rv = p.dispatch(t(""), scalar("") + scalar("")); + REQUIRE(&rv.lhs() == &t); + REQUIRE(rv.rhs() == ""); + + auto buffer = testing::eigen_scalar(); + buffer.value()() = 84.0; + Tensor corr(scalar.logical_layout(), std::move(buffer)); + REQUIRE(t == corr); + } + + SECTION("Vector") { + auto rv = p.dispatch(t("i"), vector("i") + vector("i")); + REQUIRE(&rv.lhs() == &t); + REQUIRE(rv.rhs() == "i"); + + auto buffer = testing::eigen_vector(); + for(std::size_t i = 0; i < 5; ++i) buffer.value()(i) = i + i; + Tensor corr(t.logical_layout(), std::move(buffer)); + REQUIRE(t == corr); + } + } +} \ No newline at end of file diff --git a/tests/cxx/unit_tests/tensorwrapper/inputs.hpp b/tests/cxx/unit_tests/tensorwrapper/inputs.hpp index 653a3fc1..b564969f 100644 --- a/tests/cxx/unit_tests/tensorwrapper/inputs.hpp +++ b/tests/cxx/unit_tests/tensorwrapper/inputs.hpp @@ -16,6 +16,7 @@ #pragma once #include +#include // This file contains some functions for creating TensorInput objects that span // a number of use cases. This is meant to make it easier to test TensorWrapper @@ -25,27 +26,25 @@ namespace tensorwrapper::testing { inline auto default_input() { return detail_::TensorInput{}; } -inline auto smooth_scalar() { - using buffer_type = buffer::Eigen; - using data_type = typename buffer_type::data_type; +template +inline auto smooth_scalar_() { + auto buffer = eigen_scalar(); shape::Smooth shape{}; - layout::Physical l(shape); - data_type scalar; - scalar() = 42.0; - return detail_::TensorInput(shape, buffer_type(scalar, l)); + return detail_::TensorInput(shape, std::move(buffer)); } +inline auto smooth_scalar() { return smooth_scalar_(); } + /// 5 element vector such that element i is i -inline auto smooth_vector() { - using buffer_type = buffer::Eigen; - using data_type = typename buffer_type::data_type; +template +inline auto smooth_vector_() { + auto buffer = eigen_vector(); shape::Smooth shape{5}; - layout::Physical l(shape); - data_type vector(5); - for(std::size_t i = 0; i < 5; ++i) vector(i) = i; - return detail_::TensorInput(shape, buffer_type(vector, l)); + return detail_::TensorInput(shape, std::move(buffer)); } +inline auto smooth_vector() { return smooth_vector_(); } + /// 5 element vector internally stored as a 5 by 1 matrix inline auto smooth_vector_alt() { using buffer_type = buffer::Eigen; @@ -57,19 +56,15 @@ inline auto smooth_vector_alt() { return detail_::TensorInput(shape, buffer_type(matrix, l)); } -inline auto smooth_matrix() { - using buffer_type = buffer::Eigen; - using data_type = typename buffer_type::data_type; +template +inline auto smooth_matrix_() { + auto buffer = eigen_matrix(); shape::Smooth shape{2, 2}; - layout::Physical l(shape); - data_type matrix(2, 2); - matrix(0, 0) = 1.0; - matrix(0, 1) = 2.0; - matrix(1, 0) = 3.0; - matrix(1, 1) = 4.0; - return detail_::TensorInput(shape, buffer_type(matrix, l)); + return detail_::TensorInput(shape, std::move(buffer)); } +inline auto smooth_matrix() { return smooth_matrix_(); } + inline auto smooth_symmetric_matrix() { using buffer_type = buffer::Eigen; using data_type = typename buffer_type::data_type; @@ -90,23 +85,15 @@ inline auto smooth_symmetric_matrix() { return detail_::TensorInput(shape, g, buffer_type(matrix, l)); } -inline auto smooth_tensor3() { - using buffer_type = buffer::Eigen; - using data_type = typename buffer_type::data_type; +template +inline auto smooth_tensor3_() { + auto buffer = eigen_tensor3(); shape::Smooth shape{2, 2, 2}; - layout::Physical l(shape); - data_type tensor(2, 2, 2); - tensor(0, 0, 0) = 1.0; - tensor(0, 0, 1) = 2.0; - tensor(0, 1, 0) = 3.0; - tensor(0, 1, 1) = 4.0; - tensor(1, 0, 0) = 5.0; - tensor(1, 0, 1) = 6.0; - tensor(1, 1, 0) = 7.0; - tensor(1, 1, 1) = 8.0; - return detail_::TensorInput(shape, buffer_type(tensor, l)); + return detail_::TensorInput(shape, std::move(buffer)); } +inline auto smooth_tensor3() { return smooth_tensor3_(); } + inline auto smooth_tensor4() { using buffer_type = buffer::Eigen; using data_type = typename buffer_type::data_type; diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp index f36e41a1..d0156e7d 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp @@ -197,5 +197,12 @@ TEST_CASE("TensorFactory") { REQUIRE(i.has_physical_layout()); REQUIRE_THROWS_AS(f.assert_valid(i), e_t); } + + SECTION("logical layout and buffer (should work)") { + TensorInput i(std::move(logical), std::move(pbuffer)); + REQUIRE(i.has_logical_layout()); + REQUIRE(i.has_buffer()); + REQUIRE_NOTHROW(f.assert_valid(i)); + } } } diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/tensor_class.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/tensor_class.cpp index 3a2b1e0a..25a9fe5d 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/tensor_class.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/tensor_class.cpp @@ -13,8 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "../helpers.hpp" -#include "../inputs.hpp" +#include "../testing/testing.hpp" #include #include #include @@ -99,6 +98,15 @@ TEST_CASE("Tensor") { const auto& const_defaulted = defaulted; REQUIRE_THROWS_AS(const_defaulted.logical_layout(), std::runtime_error); } + SECTION("buffer() const") { + auto& scalar_buffer = scalar.buffer(); + REQUIRE(scalar_buffer.are_equal(scalar_buffer_corr)); + + auto& vector_buffer = vector.buffer(); + REQUIRE(vector_buffer.are_equal(vector_buffer_corr)); + + REQUIRE_THROWS_AS(defaulted.buffer(), std::runtime_error); + } SECTION("buffer() const") { auto& scalar_buffer = std::as_const(scalar).buffer(); @@ -111,6 +119,24 @@ TEST_CASE("Tensor") { REQUIRE_THROWS_AS(const_defaulted.buffer(), std::runtime_error); } + SECTION("operator(std::string)") { + auto labeled_scalar = scalar(""); + auto labeled_vector = vector("i"); + + using labeled_tensor_type = Tensor::labeled_tensor_type; + REQUIRE(labeled_scalar == labeled_tensor_type(scalar, "")); + REQUIRE(labeled_vector == labeled_tensor_type(vector, "i")); + } + + SECTION("operator(std::string) const") { + auto labeled_scalar = std::as_const(scalar)(""); + auto labeled_vector = std::as_const(vector)("i"); + + using const_labeled_tensor_type = Tensor::const_labeled_tensor_type; + REQUIRE(labeled_scalar == const_labeled_tensor_type(scalar, "")); + REQUIRE(labeled_vector == const_labeled_tensor_type(vector, "i")); + } + SECTION("swap") { Tensor scalar_copy(scalar); Tensor vector_copy(vector); @@ -153,4 +179,27 @@ TEST_CASE("Tensor") { REQUIRE_FALSE(scalar != other_scalar); REQUIRE(scalar != vector); } + + SECTION("DSL") { + // These are just spot checks to make sure the DSL works on the user + // side + SECTION("Scalar") { + Tensor rv; + rv("") = scalar("") + scalar(""); + auto buffer = testing::eigen_scalar(); + buffer.value()() = 84.0; + Tensor corr(scalar.logical_layout(), std::move(buffer)); + REQUIRE(rv == corr); + } + + SECTION("Vector") { + Tensor rv; + rv("i") = vector("i") + vector("i"); + + auto buffer = testing::eigen_vector(); + for(std::size_t i = 0; i < 5; ++i) buffer.value()(i) = i + i; + Tensor corr(vector.logical_layout(), std::move(buffer)); + REQUIRE(rv == corr); + } + } } diff --git a/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp b/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp new file mode 100644 index 00000000..84643484 --- /dev/null +++ b/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp @@ -0,0 +1,93 @@ +/* + * Copyright 2024 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include + +/** @file eigen_buffers.hpp + * + * This file creates some hard-coded buffer::Eigen objects that can be used + * for testing. + * + */ + +namespace tensorwrapper::testing { + +// Typedefs of buffer::Eigen objects with various template parameters +using ebufferf0 = buffer::Eigen; +using ebufferf1 = buffer::Eigen; +using ebufferf2 = buffer::Eigen; +using ebufferf3 = buffer::Eigen; +using ebufferd0 = buffer::Eigen; +using ebufferd1 = buffer::Eigen; +using ebufferd2 = buffer::Eigen; +using ebufferd3 = buffer::Eigen; + +template +auto eigen_scalar() { + using buffer_type = buffer::Eigen; + using data_type = typename buffer_type::data_type; + data_type scalar; + scalar() = 42.0; + shape::Smooth shape{}; + layout::Physical l(shape); + return buffer_type(scalar, l); +} + +template +auto eigen_vector() { + using buffer_type = buffer::Eigen; + using data_type = typename buffer_type::data_type; + data_type vector(5); + for(std::size_t i = 0; i < 5; ++i) vector(i) = i; + shape::Smooth shape{5}; + layout::Physical l(shape); + return buffer_type(vector, l); +} + +template +auto eigen_matrix() { + using buffer_type = buffer::Eigen; + using data_type = typename buffer_type::data_type; + data_type matrix(2, 2); + matrix(0, 0) = 1.0; + matrix(0, 1) = 2.0; + matrix(1, 0) = 3.0; + matrix(1, 1) = 4.0; + shape::Smooth shape{2, 2}; + layout::Physical l(shape); + return buffer_type(matrix, l); +} + +template +auto eigen_tensor3() { + using buffer_type = buffer::Eigen; + using data_type = typename buffer_type::data_type; + shape::Smooth shape{2, 2, 2}; + layout::Physical l(shape); + data_type tensor(2, 2, 2); + tensor(0, 0, 0) = 1.0; + tensor(0, 0, 1) = 2.0; + tensor(0, 1, 0) = 3.0; + tensor(0, 1, 1) = 4.0; + tensor(1, 0, 0) = 5.0; + tensor(1, 0, 1) = 6.0; + tensor(1, 1, 0) = 7.0; + tensor(1, 1, 1) = 8.0; + return buffer_type(tensor, l); +} + +} // namespace tensorwrapper::testing \ No newline at end of file diff --git a/tests/cxx/unit_tests/tensorwrapper/testing/testing.hpp b/tests/cxx/unit_tests/tensorwrapper/testing/testing.hpp new file mode 100644 index 00000000..f5b5e683 --- /dev/null +++ b/tests/cxx/unit_tests/tensorwrapper/testing/testing.hpp @@ -0,0 +1,20 @@ +/* + * Copyright 2024 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include "../helpers.hpp" +#include "../inputs.hpp" +#include "eigen_buffers.hpp" \ No newline at end of file