Skip to content
Merged
12 changes: 11 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,17 @@ cmaize_find_or_build_optional_dependency(
CMAKE_ARGS BUILD_TESTING=OFF
ENABLE_EIGEN_SUPPORT=ON
)
set(DEPENDENCIES utilities parallelzone Boost::boost eigen sigma)

cmaize_find_or_build_dependency(
WeaklyTypedFloat
NAME WeaklyTypedFloat
URL https://www.github.com/NWChemEx/weaklytypedfloat
VERSION master
BUILD_TARGET wtf
FIND_TARGET nwx::wtf
)

set(DEPENDENCIES utilities parallelzone Boost::boost eigen sigma wtf)

if("${ENABLE_CUTENSOR}")
include(cmake/FindcuTENSOR.cmake)
Expand Down
64 changes: 64 additions & 0 deletions include/tensorwrapper/buffer/mdbuffer.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
/*
* Copyright 2025 NWChemEx-Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once
#include <tensorwrapper/types/mdbuffer_traits.hpp>

namespace tensorwrapper::buffer {

/** @brief A multidimensional (MD) buffer.
*
* This class is a dense multidimensional buffer of floating-point values.
*/
class MDBuffer {
private:
using traits_type = types::ClassTraits<MDBuffer>;

public:
/// Add types to public API
///@{
using buffer_type = typename traits_type::buffer_type;
using pimpl_type = typename traits_type::pimpl_type;
using pimpl_pointer = typename traits_type::pimpl_pointer;
using rank_type = typename traits_type::rank_type;
using shape_type = typename traits_type::shape_type;
///@}

MDBuffer() noexcept;

template<typename T>
MDBuffer(shape_type shape, std::vector<T> elements) {
MDBuffer(std::move(shape), buffer_type(std::move(elements)));
}

MDBuffer(shape_type shape, buffer_type buffer);

rank_type rank() const;

private:
explicit MDBuffer(pimpl_pointer pimpl) noexcept;

bool has_pimpl_() const noexcept;

void assert_pimpl_() const;

pimpl_type& pimpl_();
const pimpl_type& pimpl_() const;

pimpl_pointer m_pimpl_;
};

} // namespace tensorwrapper::buffer
36 changes: 36 additions & 0 deletions include/tensorwrapper/forward_declarations.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
/*
* Copyright 2025 NWChemEx-Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

namespace tensorwrapper {

namespace buffer {
namespace detail_ {
class MDBufferPIMPL;
}

class MDBuffer;

} // namespace buffer

namespace shape {

class Smooth;

} // namespace shape

} // namespace tensorwrapper
30 changes: 30 additions & 0 deletions include/tensorwrapper/symmetry/permutation.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,36 @@ class Permutation : public Operation {
*/
mode_index_type size() const noexcept { return m_cycles_.size(); }

/** @brief Permutes the objects in @p input according to *this.
*
* @tparam T The type of a container-like object. It must support size(),
* and operator[].
*
* @param[in] input The object to permute.
*
* @return A copy of @p input with its elements permuted according to
* *this.
*
* @throw std::runtime_error if the size of @p input does not match the
* rank of *this. Strong throw guarantee.
*/
template<typename T>
T apply(T input) const {
if(input.size() != m_rank_)
throw std::runtime_error(
"Input size does not match permutation rank");
for(const auto& cycle : m_cycles_) {
if(cycle.size() < 2) continue;
T buffer = input;
for(std::size_t i = 0; i < cycle.size(); ++i) {
auto from = cycle[i];
auto to = cycle[(i + 1) % cycle.size()];
input[to] = buffer[from];
}
}
return input;
}

// -------------------------------------------------------------------------
// -- Utility methods
// -------------------------------------------------------------------------
Expand Down
1 change: 1 addition & 0 deletions include/tensorwrapper/types/floating_point.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
*/

#pragma once
#include <cmath>
#include <tuple>
#ifdef ENABLE_SIGMA
#include <sigma/sigma.hpp>
Expand Down
52 changes: 52 additions & 0 deletions include/tensorwrapper/types/mdbuffer_traits.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
/*
* Copyright 2025 NWChemEx-Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once
#include <tensorwrapper/forward_declarations.hpp>
#include <tensorwrapper/types/class_traits.hpp>
#include <tensorwrapper/types/shape_traits.hpp>
#include <wtf/wtf.hpp>

namespace tensorwrapper::types {

struct MDBufferTraitsCommon {
using value_type = wtf::fp::Float;
using const_reference = wtf::fp::FloatView<const value_type>;
using buffer_type = wtf::buffer::FloatBuffer;
using const_buffer_view = wtf::buffer::BufferView<const value_type>;
using shape_type = shape::Smooth;
using rank_type = typename shape_type::rank_type;
using pimpl_type = tensorwrapper::buffer::detail_::MDBufferPIMPL;
using pimpl_pointer = std::unique_ptr<pimpl_type>;
};

template<>
struct ClassTraits<tensorwrapper::buffer::MDBuffer>
: public MDBufferTraitsCommon {
using reference = wtf::fp::FloatView<value_type>;

using buffer_view = wtf::buffer::BufferView<value_type>;
using const_buffer_view = wtf::buffer::BufferView<const value_type>;
};

template<>
struct ClassTraits<const tensorwrapper::buffer::MDBuffer>
: public MDBufferTraitsCommon {
using reference = wtf::fp::FloatView<const value_type>;
using buffer_view = wtf::buffer::BufferView<const value_type>;
};

} // namespace tensorwrapper::types
36 changes: 36 additions & 0 deletions include/tensorwrapper/types/shape_traits.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
/*
* Copyright 2025 NWChemEx-Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once
#include <cstddef>
#include <tensorwrapper/forward_declarations.hpp>
#include <tensorwrapper/types/class_traits.hpp>

namespace tensorwrapper::types {

struct ShapeTraitsCommon {
using size_type = std::size_t;
using rank_type = unsigned short;
};

template<>
struct ClassTraits<tensorwrapper::shape::Smooth> : public ShapeTraitsCommon {};

template<>
struct ClassTraits<const tensorwrapper::shape::Smooth>
: public ShapeTraitsCommon {};

} // namespace tensorwrapper::types
49 changes: 49 additions & 0 deletions src/tensorwrapper/backends/cutensor/cuda_tensor.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
/*
* Copyright 2025 NWChemEx-Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "cuda_tensor.hpp"

#ifdef ENABLE_CUTENSOR
#include "eigen_tensor.cuh"
#endif

namespace tensorwrapper::backends::cutensor {

#define TPARAMS template<typename FloatType>
#define CUDA_TENSOR CUDATensor<FloatType>

TPARAMS
void CUDA_TENSOR::contraction_assignment(label_type this_label,
label_type lhs_label,
label_type rhs_label,
const_my_reference lhs,
const_my_reference rhs) {
#ifdef ENABLE_CUTENSOR
cutensor_contraction<my_type>(this_label, lhs_label, rhs_label, lhs, rhs,
*this);
#else
throw std::runtime_error(
"cuTENSOR backend not enabled. Recompile with -DENABLE_CUTENSOR.");
#endif
}

#undef CUDA_TENSOR
#undef TPARAMS

template class CUDATensor<float>;
template class CUDATensor<double>;

} // namespace tensorwrapper::backends::cutensor
Loading