This commit is contained in:
2026-03-23 20:54:41 +08:00
commit e13b3650e9
4596 changed files with 1015768 additions and 0 deletions

View File

@@ -0,0 +1,345 @@
//
// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// Fraunhofer IOSB, Ettlingen, Germany
//
#ifndef _BOOST_UBLAS_TENSOR_ALGORITHMS_HPP
#define _BOOST_UBLAS_TENSOR_ALGORITHMS_HPP
#include <stdexcept>
#include <complex>
#include <functional>
namespace boost {
namespace numeric {
namespace ublas {
/** @brief Copies a tensor to another tensor with different layouts
*
* Implements C[i1,i2,...,ip] = A[i1,i2,...,ip]
*
* @param[in] p rank of input and output tensor
* @param[in] n pointer to the extents of input or output tensor of length p
* @param[in] pi pointer to a one-based permutation tuple of length p
* @param[out] c pointer to the output tensor
* @param[in] wc pointer to the strides of output tensor c
* @param[in] a pointer to the input tensor
* @param[in] wa pointer to the strides of input tensor a
*/
template <class PointerOut, class PointerIn, class SizeType>
void copy(const SizeType p, SizeType const*const n,
PointerOut c, SizeType const*const wc,
PointerIn a, SizeType const*const wa)
{
static_assert( std::is_pointer<PointerOut>::value & std::is_pointer<PointerIn>::value,
"Static error in boost::numeric::ublas::copy: Argument types for pointers are not pointer types.");
if( p == 0 )
return;
if(c == nullptr || a == nullptr)
throw std::length_error("Error in boost::numeric::ublas::copy: Pointers shall not be null pointers.");
if(wc == nullptr || wa == nullptr)
throw std::length_error("Error in boost::numeric::ublas::copy: Pointers shall not be null pointers.");
if(n == nullptr)
throw std::length_error("Error in boost::numeric::ublas::copy: Pointers shall not be null pointers.");
std::function<void(SizeType r, PointerOut c, PointerIn a)> lambda;
lambda = [&lambda, n, wc, wa](SizeType r, PointerOut c, PointerIn a)
{
if(r > 0)
for(auto d = 0u; d < n[r]; c += wc[r], a += wa[r], ++d)
lambda(r-1, c, a );
else
for(auto d = 0u; d < n[0]; c += wc[0], a += wa[0], ++d)
*c = *a;
};
lambda( p-1, c, a );
}
/** @brief Copies a tensor to another tensor with different layouts applying a unary operation
*
* Implements C[i1,i2,...,ip] = op ( A[i1,i2,...,ip] )
*
* @param[in] p rank of input and output tensor
* @param[in] n pointer to the extents of input or output tensor of length p
* @param[in] pi pointer to a one-based permutation tuple of length p
* @param[out] c pointer to the output tensor
* @param[in] wc pointer to the strides of output tensor c
* @param[in] a pointer to the input tensor
* @param[in] wa pointer to the strides of input tensor a
* @param[in] op unary operation
*/
template <class PointerOut, class PointerIn, class SizeType, class UnaryOp>
void transform(const SizeType p,
SizeType const*const n,
PointerOut c, SizeType const*const wc,
PointerIn a, SizeType const*const wa,
UnaryOp op)
{
static_assert( std::is_pointer<PointerOut>::value & std::is_pointer<PointerIn>::value,
"Static error in boost::numeric::ublas::transform: Argument types for pointers are not pointer types.");
if( p == 0 )
return;
if(c == nullptr || a == nullptr)
throw std::length_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers.");
if(wc == nullptr || wa == nullptr)
throw std::length_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers.");
if(n == nullptr)
throw std::length_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers.");
std::function<void(SizeType r, PointerOut c, PointerIn a)> lambda;
lambda = [&lambda, n, wc, wa, op](SizeType r, PointerOut c, PointerIn a)
{
if(r > 0)
for(auto d = 0u; d < n[r]; c += wc[r], a += wa[r], ++d)
lambda(r-1, c, a);
else
for(auto d = 0u; d < n[0]; c += wc[0], a += wa[0], ++d)
*c = op(*a);
};
lambda( p-1, c, a );
}
/** @brief Performs a reduce operation with all elements of the tensor and an initial value
*
* Implements k = sum_{i1,..,ip} A[i1,i2,...,ip]
*
* @param[in] r zero-based recursion level starting with r=p-1
* @param[in] n pointer to the extents of input or output tensor
* @param[in] a pointer to the first input tensor
* @param[in] w pointer to the strides of input tensor a
* @param[in] k accumulated value
*/
template <class PointerIn, class ValueType, class SizeType>
ValueType accumulate(SizeType const p, SizeType const*const n,
PointerIn a, SizeType const*const w,
ValueType k)
{
static_assert(std::is_pointer<PointerIn>::value,
"Static error in boost::numeric::ublas::transform: Argument types for pointers are not pointer types.");
if( p == 0 )
return k;
if(a == nullptr)
throw std::length_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers.");
if(w == nullptr)
throw std::length_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers.");
if(n == nullptr)
throw std::length_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers.");
std::function<ValueType(SizeType r, PointerIn a, ValueType k)> lambda;
lambda = [&lambda, n, w](SizeType r, PointerIn a, ValueType k)
{
if(r > 0u)
for(auto d = 0u; d < n[r]; a += w[r], ++d)
k = lambda(r-1, a, k);
else
for(auto d = 0u; d < n[0]; a += w[0], ++d)
k += *a;
return k;
};
return lambda( p-1, a, k );
}
/** @brief Performs a reduce operation with all elements of the tensor and an initial value
*
* Implements k = op ( k , A[i1,i2,...,ip] ), for all ir
*
* @param[in] r zero-based recursion level starting with r=p-1
* @param[in] n pointer to the extents of input or output tensor
* @param[in] a pointer to the first input tensor
* @param[in] w pointer to the strides of input tensor a
* @param[in] k accumulated value
* @param[in] op binary operation
*/
template <class PointerIn, class ValueType, class SizeType, class BinaryOp>
ValueType accumulate(SizeType const p, SizeType const*const n,
PointerIn a, SizeType const*const w,
ValueType k, BinaryOp op)
{
static_assert(std::is_pointer<PointerIn>::value,
"Static error in boost::numeric::ublas::transform: Argument types for pointers are not pointer types.");
if( p == 0 )
return k;
if(a == nullptr)
throw std::length_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers.");
if(w == nullptr)
throw std::length_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers.");
if(n == nullptr)
throw std::length_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers.");
std::function<ValueType(SizeType r, PointerIn a, ValueType k)> lambda;
lambda = [&lambda, n, w, op](SizeType r, PointerIn a, ValueType k)
{
if(r > 0u)
for(auto d = 0u; d < n[r]; a += w[r], ++d)
k = lambda(r-1, a, k);
else
for(auto d = 0u; d < n[0]; a += w[0], ++d)
k = op ( k, *a );
return k;
};
return lambda( p-1, a, k );
}
/** @brief Transposes a tensor
*
* Implements C[tau[i1],tau[i2],...,tau[ip]] = A[i1,i2,...,ip]
*
* @note is used in function trans
*
* @param[in] p rank of input and output tensor
* @param[in] na pointer to the extents of the input tensor a of length p
* @param[in] pi pointer to a one-based permutation tuple of length p
* @param[out] c pointer to the output tensor
* @param[in] wc pointer to the strides of output tensor c
* @param[in] a pointer to the input tensor
* @param[in] wa pointer to the strides of input tensor a
*/
template <class PointerOut, class PointerIn, class SizeType>
void trans( SizeType const p, SizeType const*const na, SizeType const*const pi,
PointerOut c, SizeType const*const wc,
PointerIn a, SizeType const*const wa)
{
static_assert( std::is_pointer<PointerOut>::value & std::is_pointer<PointerIn>::value,
"Static error in boost::numeric::ublas::trans: Argument types for pointers are not pointer types.");
if( p < 2)
return;
if(c == nullptr || a == nullptr)
throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers.");
if(na == nullptr)
throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null.");
if(wc == nullptr || wa == nullptr)
throw std::length_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers.");
if(na == nullptr)
throw std::length_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers.");
if(pi == nullptr)
throw std::length_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers.");
std::function<void(SizeType r, PointerOut c, PointerIn a)> lambda;
lambda = [&lambda, na, wc, wa, pi](SizeType r, PointerOut c, PointerIn a)
{
if(r > 0)
for(auto d = 0u; d < na[r]; c += wc[pi[r]-1], a += wa[r], ++d)
lambda(r-1, c, a);
else
for(auto d = 0u; d < na[0]; c += wc[pi[0]-1], a += wa[0], ++d)
*c = *a;
};
lambda( p-1, c, a );
}
/** @brief Transposes a tensor
*
* Implements C[tau[i1],tau[i2],...,tau[ip]] = A[i1,i2,...,ip]
*
* @note is used in function trans
*
* @param[in] p rank of input and output tensor
* @param[in] na pointer to the extents of the input tensor a of length p
* @param[in] pi pointer to a one-based permutation tuple of length p
* @param[out] c pointer to the output tensor
* @param[in] wc pointer to the strides of output tensor c
* @param[in] a pointer to the input tensor
* @param[in] wa pointer to the strides of input tensor a
*/
template <class ValueType, class SizeType>
void trans( SizeType const p,
SizeType const*const na,
SizeType const*const pi,
std::complex<ValueType>* c, SizeType const*const wc,
std::complex<ValueType>* a, SizeType const*const wa)
{
if( p < 2)
return;
if(c == nullptr || a == nullptr)
throw std::length_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers.");
if(wc == nullptr || wa == nullptr)
throw std::length_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers.");
if(na == nullptr)
throw std::length_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers.");
if(pi == nullptr)
throw std::length_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers.");
std::function<void(SizeType r, std::complex<ValueType>* c, std::complex<ValueType>* a)> lambda;
lambda = [&lambda, na, wc, wa, pi](SizeType r, std::complex<ValueType>* c, std::complex<ValueType>* a)
{
if(r > 0)
for(auto d = 0u; d < na[r]; c += wc[pi[r]-1], a += wa[r], ++d)
lambda(r-1, c, a);
else
for(auto d = 0u; d < na[0]; c += wc[pi[0]-1], a += wa[0], ++d)
*c = std::conj(*a);
};
lambda( p-1, c, a );
}
}
}
}
#endif

View File

@@ -0,0 +1,181 @@
//
// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// Fraunhofer IOSB, Ettlingen, Germany
//
#ifndef BOOST_UBLAS_TENSOR_EXPRESSIONS_HPP
#define BOOST_UBLAS_TENSOR_EXPRESSIONS_HPP
#include <cstddef>
#include <boost/numeric/ublas/expression_types.hpp>
namespace boost {
namespace numeric {
namespace ublas {
template<class element_type, class storage_format, class storage_type>
class tensor;
template<class size_type>
class basic_extents;
//TODO: put in fwd.hpp
struct tensor_tag {};
}
}
}
namespace boost {
namespace numeric {
namespace ublas {
namespace detail {
/** @\brief base class for tensor expressions
*
* \note implements crtp - no use of virtual function calls
*
* \tparam T type of the tensor
* \tparam E type of the derived expression (crtp)
*
**/
template<class T, class E>
struct tensor_expression
: public ublas_expression<E>
{
// static const unsigned complexity = 0;
using expression_type = E;
using type_category = tensor_tag;
using tensor_type = T;
BOOST_UBLAS_INLINE
auto const& operator()() const { return *static_cast<const expression_type*> (this); }
protected :
explicit tensor_expression() = default;
tensor_expression(const tensor_expression&) = delete;
tensor_expression& operator=(const tensor_expression&) = delete;
};
template<class T, class EL, class ER, class OP>
struct binary_tensor_expression
: public tensor_expression <T, binary_tensor_expression<T,EL,ER,OP>>
{
using self_type = binary_tensor_expression<T,EL,ER,OP>;
using tensor_type = T;
using binary_operation = OP;
using expression_type_left = EL;
using expression_type_right = ER;
using derived_type = tensor_expression <tensor_type,self_type>;
using size_type = typename tensor_type::size_type;
explicit binary_tensor_expression(expression_type_left const& l, expression_type_right const& r, binary_operation o)
: el(l) , er(r) , op(o) {}
binary_tensor_expression() = delete;
binary_tensor_expression(const binary_tensor_expression& l) = delete;
binary_tensor_expression(binary_tensor_expression&& l)
: el(l.el), er(l.er), op(l.op) {}
BOOST_UBLAS_INLINE
decltype(auto) operator()(size_type i) const { return op(el(i), er(i)); }
expression_type_left const& el;
expression_type_right const& er;
binary_operation op;
};
/// @brief helper function to simply instantiation of lambda proxy class
template<class T, class EL, class ER, class OP>
auto make_binary_tensor_expression( tensor_expression<T,EL> const& el, tensor_expression<T,ER> const& er, OP op)
{
return binary_tensor_expression<T,EL,ER,OP>( el(), er(), op) ;
}
template<class T, class EL, class ER, class OP>
auto make_binary_tensor_expression( matrix_expression<EL> const& el, tensor_expression<T,ER> const& er, OP op)
{
return binary_tensor_expression<T,EL,ER,OP>( el(), er(), op) ;
}
template<class T, class EL, class ER, class OP>
auto make_binary_tensor_expression( tensor_expression<T,EL> const& el, matrix_expression<ER> const& er, OP op)
{
return binary_tensor_expression<T,EL,ER,OP>( el(), er(), op) ;
}
template<class T, class EL, class ER, class OP>
auto make_binary_tensor_expression( vector_expression<EL> const& el, tensor_expression<T,ER> const& er, OP op)
{
return binary_tensor_expression<T,EL,ER,OP>( el(), er(), op) ;
}
template<class T, class EL, class ER, class OP>
auto make_binary_tensor_expression( tensor_expression<T,EL> const& el, vector_expression<ER> const& er, OP op)
{
return binary_tensor_expression<T,EL,ER,OP>( el(), er(), op) ;
}
template<class T, class E, class OP>
struct unary_tensor_expression
: public tensor_expression <T, unary_tensor_expression<T,E,OP>>
{
using self_type = unary_tensor_expression<T,E,OP>;
using tensor_type = T;
using expression_type = E;
using derived_type = tensor_expression <T, unary_tensor_expression<T,E,OP>>;
using size_type = typename tensor_type::size_type;
explicit unary_tensor_expression(E const& ee, OP o) : e(ee) , op(o) {}
unary_tensor_expression() = delete;
unary_tensor_expression(const unary_tensor_expression& l) = delete;
unary_tensor_expression(unary_tensor_expression&& l)
: e(l.e), op(op.l) {}
BOOST_UBLAS_INLINE
decltype(auto) operator()(size_type i) const { return op(e(i)); }
E const& e;
OP op;
};
// \brief helper function to simply instantiation of lambda proxy class
template<class T, class E, class OP>
auto make_unary_tensor_expression( tensor_expression<T,E> const& e, OP op)
{
return unary_tensor_expression<T,E,OP>( e() , op);
}
template<class T, class E, class OP>
auto make_unary_tensor_expression( matrix_expression<E> const& e, OP op)
{
return unary_tensor_expression<T,E,OP>( e() , op);
}
template<class T, class E, class OP>
auto make_unary_tensor_expression( vector_expression<E> const& e, OP op)
{
return unary_tensor_expression<T,E,OP>( e() , op);
}
}
}
}
}
#endif

View File

@@ -0,0 +1,288 @@
//
// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// Fraunhofer IOSB, Ettlingen, Germany
//
#ifndef _BOOST_UBLAS_TENSOR_EXPRESSIONS_EVALUATION_HPP_
#define _BOOST_UBLAS_TENSOR_EXPRESSIONS_EVALUATION_HPP_
#include <type_traits>
#include <stdexcept>
namespace boost::numeric::ublas {
template<class element_type, class storage_format, class storage_type>
class tensor;
template<class size_type>
class basic_extents;
}
namespace boost::numeric::ublas::detail {
template<class T, class D>
struct tensor_expression;
template<class T, class EL, class ER, class OP>
struct binary_tensor_expression;
template<class T, class E, class OP>
struct unary_tensor_expression;
}
namespace boost::numeric::ublas::detail {
template<class T, class E>
struct has_tensor_types
{ static constexpr bool value = false; };
template<class T>
struct has_tensor_types<T,T>
{ static constexpr bool value = true; };
template<class T, class D>
struct has_tensor_types<T, tensor_expression<T,D>>
{ static constexpr bool value = std::is_same<T,D>::value || has_tensor_types<T,D>::value; };
template<class T, class EL, class ER, class OP>
struct has_tensor_types<T, binary_tensor_expression<T,EL,ER,OP>>
{ static constexpr bool value = std::is_same<T,EL>::value || std::is_same<T,ER>::value || has_tensor_types<T,EL>::value || has_tensor_types<T,ER>::value; };
template<class T, class E, class OP>
struct has_tensor_types<T, unary_tensor_expression<T,E,OP>>
{ static constexpr bool value = std::is_same<T,E>::value || has_tensor_types<T,E>::value; };
} // namespace boost::numeric::ublas::detail
namespace boost::numeric::ublas::detail {
/** @brief Retrieves extents of the tensor
*
*/
template<class T, class F, class A>
auto retrieve_extents(tensor<T,F,A> const& t)
{
return t.extents();
}
/** @brief Retrieves extents of the tensor expression
*
* @note tensor expression must be a binary tree with at least one tensor type
*
* @returns extents of the child expression if it is a tensor or extents of one child of its child.
*/
template<class T, class D>
auto retrieve_extents(tensor_expression<T,D> const& expr)
{
static_assert(detail::has_tensor_types<T,tensor_expression<T,D>>::value,
"Error in boost::numeric::ublas::detail::retrieve_extents: Expression to evaluate should contain tensors.");
auto const& cast_expr = static_cast<D const&>(expr);
if constexpr ( std::is_same<T,D>::value )
return cast_expr.extents();
else
return retrieve_extents(cast_expr);
}
/** @brief Retrieves extents of the binary tensor expression
*
* @note tensor expression must be a binary tree with at least one tensor type
*
* @returns extents of the (left and if necessary then right) child expression if it is a tensor or extents of a child of its (left and if necessary then right) child.
*/
template<class T, class EL, class ER, class OP>
auto retrieve_extents(binary_tensor_expression<T,EL,ER,OP> const& expr)
{
static_assert(detail::has_tensor_types<T,binary_tensor_expression<T,EL,ER,OP>>::value,
"Error in boost::numeric::ublas::detail::retrieve_extents: Expression to evaluate should contain tensors.");
if constexpr ( std::is_same<T,EL>::value )
return expr.el.extents();
if constexpr ( std::is_same<T,ER>::value )
return expr.er.extents();
else if constexpr ( detail::has_tensor_types<T,EL>::value )
return retrieve_extents(expr.el);
else if constexpr ( detail::has_tensor_types<T,ER>::value )
return retrieve_extents(expr.er);
}
/** @brief Retrieves extents of the binary tensor expression
*
* @note tensor expression must be a binary tree with at least one tensor type
*
* @returns extents of the child expression if it is a tensor or extents of a child of its child.
*/
template<class T, class E, class OP>
auto retrieve_extents(unary_tensor_expression<T,E,OP> const& expr)
{
static_assert(detail::has_tensor_types<T,unary_tensor_expression<T,E,OP>>::value,
"Error in boost::numeric::ublas::detail::retrieve_extents: Expression to evaluate should contain tensors.");
if constexpr ( std::is_same<T,E>::value )
return expr.e.extents();
else if constexpr ( detail::has_tensor_types<T,E>::value )
return retrieve_extents(expr.e);
}
} // namespace boost::numeric::ublas::detail
///////////////
namespace boost::numeric::ublas::detail {
template<class T, class F, class A, class S>
auto all_extents_equal(tensor<T,F,A> const& t, basic_extents<S> const& extents)
{
return extents == t.extents();
}
template<class T, class D, class S>
auto all_extents_equal(tensor_expression<T,D> const& expr, basic_extents<S> const& extents)
{
static_assert(detail::has_tensor_types<T,tensor_expression<T,D>>::value,
"Error in boost::numeric::ublas::detail::all_extents_equal: Expression to evaluate should contain tensors.");
auto const& cast_expr = static_cast<D const&>(expr);
if constexpr ( std::is_same<T,D>::value )
if( extents != cast_expr.extents() )
return false;
if constexpr ( detail::has_tensor_types<T,D>::value )
if ( !all_extents_equal(cast_expr, extents))
return false;
return true;
}
template<class T, class EL, class ER, class OP, class S>
auto all_extents_equal(binary_tensor_expression<T,EL,ER,OP> const& expr, basic_extents<S> const& extents)
{
static_assert(detail::has_tensor_types<T,binary_tensor_expression<T,EL,ER,OP>>::value,
"Error in boost::numeric::ublas::detail::all_extents_equal: Expression to evaluate should contain tensors.");
if constexpr ( std::is_same<T,EL>::value )
if(extents != expr.el.extents())
return false;
if constexpr ( std::is_same<T,ER>::value )
if(extents != expr.er.extents())
return false;
if constexpr ( detail::has_tensor_types<T,EL>::value )
if(!all_extents_equal(expr.el, extents))
return false;
if constexpr ( detail::has_tensor_types<T,ER>::value )
if(!all_extents_equal(expr.er, extents))
return false;
return true;
}
template<class T, class E, class OP, class S>
auto all_extents_equal(unary_tensor_expression<T,E,OP> const& expr, basic_extents<S> const& extents)
{
static_assert(detail::has_tensor_types<T,unary_tensor_expression<T,E,OP>>::value,
"Error in boost::numeric::ublas::detail::all_extents_equal: Expression to evaluate should contain tensors.");
if constexpr ( std::is_same<T,E>::value )
if(extents != expr.e.extents())
return false;
if constexpr ( detail::has_tensor_types<T,E>::value )
if(!all_extents_equal(expr.e, extents))
return false;
return true;
}
} // namespace boost::numeric::ublas::detail
namespace boost::numeric::ublas::detail {
/** @brief Evaluates expression for a tensor
*
* Assigns the results of the expression to the tensor.
*
* \note Checks if shape of the tensor matches those of all tensors within the expression.
*/
template<class tensor_type, class derived_type>
void eval(tensor_type& lhs, tensor_expression<tensor_type, derived_type> const& expr)
{
if constexpr (detail::has_tensor_types<tensor_type, tensor_expression<tensor_type,derived_type> >::value )
if(!detail::all_extents_equal(expr, lhs.extents() ))
throw std::runtime_error("Error in boost::numeric::ublas::tensor: expression contains tensors with different shapes.");
#pragma omp parallel for
for(auto i = 0u; i < lhs.size(); ++i)
lhs(i) = expr()(i);
}
/** @brief Evaluates expression for a tensor
*
* Applies a unary function to the results of the expressions before the assignment.
* Usually applied needed for unary operators such as A += C;
*
* \note Checks if shape of the tensor matches those of all tensors within the expression.
*/
template<class tensor_type, class derived_type, class unary_fn>
void eval(tensor_type& lhs, tensor_expression<tensor_type, derived_type> const& expr, unary_fn const fn)
{
if constexpr (detail::has_tensor_types< tensor_type, tensor_expression<tensor_type,derived_type> >::value )
if(!detail::all_extents_equal( expr, lhs.extents() ))
throw std::runtime_error("Error in boost::numeric::ublas::tensor: expression contains tensors with different shapes.");
#pragma omp parallel for
for(auto i = 0u; i < lhs.size(); ++i)
fn(lhs(i), expr()(i));
}
/** @brief Evaluates expression for a tensor
*
* Applies a unary function to the results of the expressions before the assignment.
* Usually applied needed for unary operators such as A += C;
*
* \note Checks if shape of the tensor matches those of all tensors within the expression.
*/
template<class tensor_type, class unary_fn>
void eval(tensor_type& lhs, unary_fn const fn)
{
#pragma omp parallel for
for(auto i = 0u; i < lhs.size(); ++i)
fn(lhs(i));
}
}
#endif

View File

@@ -0,0 +1,335 @@
//
// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// Fraunhofer IOSB, Ettlingen, Germany
//
#ifndef BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_HPP
#define BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_HPP
#include <algorithm>
#include <initializer_list>
#include <limits>
#include <numeric>
#include <stdexcept>
#include <vector>
#include <cassert>
namespace boost {
namespace numeric {
namespace ublas {
/** @brief Template class for storing tensor extents with runtime variable size.
*
* Proxy template class of std::vector<int_type>.
*
*/
template<class int_type>
class basic_extents
{
static_assert( std::numeric_limits<typename std::vector<int_type>::value_type>::is_integer, "Static error in basic_layout: type must be of type integer.");
static_assert(!std::numeric_limits<typename std::vector<int_type>::value_type>::is_signed, "Static error in basic_layout: type must be of type unsigned integer.");
public:
using base_type = std::vector<int_type>;
using value_type = typename base_type::value_type;
using const_reference = typename base_type::const_reference;
using reference = typename base_type::reference;
using size_type = typename base_type::size_type;
using const_pointer = typename base_type::const_pointer;
using const_iterator = typename base_type::const_iterator;
/** @brief Default constructs basic_extents
*
* @code auto ex = basic_extents<unsigned>{};
*/
constexpr explicit basic_extents()
: _base{}
{
}
/** @brief Copy constructs basic_extents from a one-dimensional container
*
* @code auto ex = basic_extents<unsigned>( std::vector<unsigned>(3u,3u) );
*
* @note checks if size > 1 and all elements > 0
*
* @param b one-dimensional std::vector<int_type> container
*/
explicit basic_extents(base_type const& b)
: _base(b)
{
if (!this->valid()){
throw std::length_error("Error in basic_extents::basic_extents() : shape tuple is not a valid permutation: has zero elements.");
}
}
/** @brief Move constructs basic_extents from a one-dimensional container
*
* @code auto ex = basic_extents<unsigned>( std::vector<unsigned>(3u,3u) );
*
* @note checks if size > 1 and all elements > 0
*
* @param b one-dimensional container of type std::vector<int_type>
*/
explicit basic_extents(base_type && b)
: _base(std::move(b))
{
if (!this->valid()){
throw std::length_error("Error in basic_extents::basic_extents() : shape tuple is not a valid permutation: has zero elements.");
}
}
/** @brief Constructs basic_extents from an initializer list
*
* @code auto ex = basic_extents<unsigned>{3,2,4};
*
* @note checks if size > 1 and all elements > 0
*
* @param l one-dimensional list of type std::initializer<int_type>
*/
basic_extents(std::initializer_list<value_type> l)
: basic_extents( base_type(std::move(l)) )
{
}
/** @brief Constructs basic_extents from a range specified by two iterators
*
* @code auto ex = basic_extents<unsigned>(a.begin(), a.end());
*
* @note checks if size > 1 and all elements > 0
*
* @param first iterator pointing to the first element
* @param last iterator pointing to the next position after the last element
*/
basic_extents(const_iterator first, const_iterator last)
: basic_extents ( base_type( first,last ) )
{
}
/** @brief Copy constructs basic_extents */
basic_extents(basic_extents const& l )
: _base(l._base)
{
}
/** @brief Move constructs basic_extents */
basic_extents(basic_extents && l ) noexcept
: _base(std::move(l._base))
{
}
~basic_extents() = default;
basic_extents& operator=(basic_extents other) noexcept
{
swap (*this, other);
return *this;
}
friend void swap(basic_extents& lhs, basic_extents& rhs) {
std::swap(lhs._base , rhs._base );
}
/** @brief Returns true if this has a scalar shape
*
* @returns true if (1,1,[1,...,1])
*/
bool is_scalar() const
{
return
_base.size() != 0 &&
std::all_of(_base.begin(), _base.end(),
[](const_reference a){ return a == 1;});
}
/** @brief Returns true if this has a vector shape
*
* @returns true if (1,n,[1,...,1]) or (n,1,[1,...,1]) with n > 1
*/
bool is_vector() const
{
if(_base.size() == 0){
return false;
}
if(_base.size() == 1){
return _base.at(0) > 1;
}
auto greater_one = [](const_reference a){ return a > 1;};
auto equal_one = [](const_reference a){ return a == 1;};
return
std::any_of(_base.begin(), _base.begin()+2, greater_one) &&
std::any_of(_base.begin(), _base.begin()+2, equal_one ) &&
std::all_of(_base.begin()+2, _base.end(), equal_one);
}
/** @brief Returns true if this has a matrix shape
*
* @returns true if (m,n,[1,...,1]) with m > 1 and n > 1
*/
bool is_matrix() const
{
if(_base.size() < 2){
return false;
}
auto greater_one = [](const_reference a){ return a > 1;};
auto equal_one = [](const_reference a){ return a == 1;};
return
std::all_of(_base.begin(), _base.begin()+2, greater_one) &&
std::all_of(_base.begin()+2, _base.end(), equal_one );
}
/** @brief Returns true if this is has a tensor shape
*
* @returns true if !empty() && !is_scalar() && !is_vector() && !is_matrix()
*/
bool is_tensor() const
{
if(_base.size() < 3){
return false;
}
auto greater_one = [](const_reference a){ return a > 1;};
return std::any_of(_base.begin()+2, _base.end(), greater_one);
}
const_pointer data() const
{
return this->_base.data();
}
const_reference operator[] (size_type p) const
{
return this->_base[p];
}
const_reference at (size_type p) const
{
return this->_base.at(p);
}
reference operator[] (size_type p)
{
return this->_base[p];
}
reference at (size_type p)
{
return this->_base.at(p);
}
bool empty() const
{
return this->_base.empty();
}
size_type size() const
{
return this->_base.size();
}
/** @brief Returns true if size > 1 and all elements > 0 */
bool valid() const
{
return
this->size() > 1 &&
std::none_of(_base.begin(), _base.end(),
[](const_reference a){ return a == value_type(0); });
}
/** @brief Returns the number of elements a tensor holds with this */
size_type product() const
{
if(_base.empty()){
return 0;
}
return std::accumulate(_base.begin(), _base.end(), 1ul, std::multiplies<>());
}
/** @brief Eliminates singleton dimensions when size > 2
*
* squeeze { 1,1} -> { 1,1}
* squeeze { 2,1} -> { 2,1}
* squeeze { 1,2} -> { 1,2}
*
* squeeze {1,2,3} -> { 2,3}
* squeeze {2,1,3} -> { 2,3}
* squeeze {1,3,1} -> { 3,1}
*
*/
basic_extents squeeze() const
{
if(this->size() <= 2){
return *this;
}
auto new_extent = basic_extents{};
auto insert_iter = std::back_insert_iterator<typename basic_extents::base_type>(new_extent._base);
std::remove_copy(this->_base.begin(), this->_base.end(), insert_iter ,value_type{1});
return new_extent;
}
void clear()
{
this->_base.clear();
}
bool operator == (basic_extents const& b) const
{
return _base == b._base;
}
bool operator != (basic_extents const& b) const
{
return !( _base == b._base );
}
const_iterator
begin() const
{
return _base.begin();
}
const_iterator
end() const
{
return _base.end();
}
base_type const& base() const { return _base; }
private:
base_type _base;
};
using shape = basic_extents<std::size_t>;
} // namespace ublas
} // namespace numeric
} // namespace boost
#endif

View File

@@ -0,0 +1,558 @@
//
// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// Fraunhofer IOSB, Ettlingen, Germany
//
#ifndef BOOST_UBLAS_TENSOR_FUNCTIONS_HPP
#define BOOST_UBLAS_TENSOR_FUNCTIONS_HPP
#include <stdexcept>
#include <vector>
#include <algorithm>
#include <numeric>
#include "multiplication.hpp"
#include "algorithms.hpp"
#include "expression.hpp"
#include "expression_evaluation.hpp"
#include "storage_traits.hpp"
namespace boost {
namespace numeric {
namespace ublas {
template<class Value, class Format, class Allocator>
class tensor;
template<class Value, class Format, class Allocator>
class matrix;
template<class Value, class Allocator>
class vector;
/** @brief Computes the m-mode tensor-times-vector product
*
* Implements C[i1,...,im-1,im+1,...,ip] = A[i1,i2,...,ip] * b[im]
*
* @note calls ublas::ttv
*
* @param[in] m contraction dimension with 1 <= m <= p
* @param[in] a tensor object A with order p
* @param[in] b vector object B
*
* @returns tensor object C with order p-1, the same storage format and allocator type as A
*/
template<class V, class F, class A1, class A2>
auto prod(tensor<V,F,A1> const& a, vector<V,A2> const& b, const std::size_t m)
{
using tensor_type = tensor<V,F,A1>;
using extents_type = typename tensor_type::extents_type;
using ebase_type = typename extents_type::base_type;
using value_type = typename tensor_type::value_type;
using size_type = typename extents_type::value_type;
auto const p = std::size_t(a.rank());
if( m == 0)
throw std::length_error("error in boost::numeric::ublas::prod(ttv): contraction mode must be greater than zero.");
if( p < m )
throw std::length_error("error in boost::numeric::ublas::prod(ttv): rank of tensor must be greater than or equal to the modus.");
if( p == 0)
throw std::length_error("error in boost::numeric::ublas::prod(ttv): rank of tensor must be greater than zero.");
if( a.empty() )
throw std::length_error("error in boost::numeric::ublas::prod(ttv): first argument tensor should not be empty.");
if( b.size() == 0)
throw std::length_error("error in boost::numeric::ublas::prod(ttv): second argument vector should not be empty.");
auto nc = ebase_type(std::max(p-1, size_type(2)) , size_type(1));
auto nb = ebase_type{b.size(),1};
for(auto i = 0u, j = 0u; i < p; ++i)
if(i != m-1)
nc[j++] = a.extents().at(i);
auto c = tensor_type(extents_type(nc),value_type{});
auto bb = &(b(0));
ttv(m, p,
c.data(), c.extents().data(), c.strides().data(),
a.data(), a.extents().data(), a.strides().data(),
bb, nb.data(), nb.data());
return c;
}
/** @brief Computes the m-mode tensor-times-matrix product
*
* Implements C[i1,...,im-1,j,im+1,...,ip] = A[i1,i2,...,ip] * B[j,im]
*
* @note calls ublas::ttm
*
* @param[in] a tensor object A with order p
* @param[in] b vector object B
* @param[in] m contraction dimension with 1 <= m <= p
*
* @returns tensor object C with order p, the same storage format and allocator type as A
*/
template<class V, class F, class A1, class A2>
auto prod(tensor<V,F,A1> const& a, matrix<V,F,A2> const& b, const std::size_t m)
{
using tensor_type = tensor<V,F,A1>;
using extents_type = typename tensor_type::extents_type;
using strides_type = typename tensor_type::strides_type;
using value_type = typename tensor_type::value_type;
auto const p = a.rank();
if( m == 0)
throw std::length_error("error in boost::numeric::ublas::prod(ttm): contraction mode must be greater than zero.");
if( p < m || m > a.extents().size())
throw std::length_error("error in boost::numeric::ublas::prod(ttm): rank of the tensor must be greater equal the modus.");
if( p == 0)
throw std::length_error("error in boost::numeric::ublas::prod(ttm): rank of the tensor must be greater than zero.");
if( a.empty() )
throw std::length_error("error in boost::numeric::ublas::prod(ttm): first argument tensor should not be empty.");
if( b.size1()*b.size2() == 0)
throw std::length_error("error in boost::numeric::ublas::prod(ttm): second argument matrix should not be empty.");
auto nc = a.extents().base();
auto nb = extents_type {b.size1(),b.size2()};
auto wb = strides_type (nb);
nc[m-1] = nb[0];
auto c = tensor_type(extents_type(nc),value_type{});
auto bb = &(b(0,0));
ttm(m, p,
c.data(), c.extents().data(), c.strides().data(),
a.data(), a.extents().data(), a.strides().data(),
bb, nb.data(), wb.data());
return c;
}
/** @brief Computes the q-mode tensor-times-tensor product
*
* Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] )
*
* @note calls ublas::ttt
*
* na[phia[x]] = nb[phib[x]] for 1 <= x <= q
*
* @param[in] phia one-based permutation tuple of length q for the first input tensor a
* @param[in] phib one-based permutation tuple of length q for the second input tensor b
* @param[in] a left-hand side tensor with order r+q
* @param[in] b right-hand side tensor with order s+q
* @result tensor with order r+s
*/
template<class V, class F, class A1, class A2>
auto prod(tensor<V,F,A1> const& a, tensor<V,F,A2> const& b,
std::vector<std::size_t> const& phia, std::vector<std::size_t> const& phib)
{
using tensor_type = tensor<V,F,A1>;
using extents_type = typename tensor_type::extents_type;
using value_type = typename tensor_type::value_type;
using size_type = typename extents_type::value_type;
auto const pa = a.rank();
auto const pb = b.rank();
auto const q = size_type(phia.size());
if(pa == 0ul)
throw std::runtime_error("error in ublas::prod: order of left-hand side tensor must be greater than 0.");
if(pb == 0ul)
throw std::runtime_error("error in ublas::prod: order of right-hand side tensor must be greater than 0.");
if(pa < q)
throw std::runtime_error("error in ublas::prod: number of contraction dimensions cannot be greater than the order of the left-hand side tensor.");
if(pb < q)
throw std::runtime_error("error in ublas::prod: number of contraction dimensions cannot be greater than the order of the right-hand side tensor.");
if(q != phib.size())
throw std::runtime_error("error in ublas::prod: permutation tuples must have the same length.");
if(pa < phia.size())
throw std::runtime_error("error in ublas::prod: permutation tuple for the left-hand side tensor cannot be greater than the corresponding order.");
if(pb < phib.size())
throw std::runtime_error("error in ublas::prod: permutation tuple for the right-hand side tensor cannot be greater than the corresponding order.");
auto const& na = a.extents();
auto const& nb = b.extents();
for(auto i = 0ul; i < q; ++i)
if( na.at(phia.at(i)-1) != nb.at(phib.at(i)-1))
throw std::runtime_error("error in ublas::prod: permutations of the extents are not correct.");
auto const r = pa - q;
auto const s = pb - q;
std::vector<std::size_t> phia1(pa), phib1(pb);
std::iota(phia1.begin(), phia1.end(), 1ul);
std::iota(phib1.begin(), phib1.end(), 1ul);
std::vector<std::size_t> nc( std::max ( r+s , size_type(2) ), size_type(1) );
for(auto i = 0ul; i < phia.size(); ++i)
* std::remove(phia1.begin(), phia1.end(), phia.at(i)) = phia.at(i);
//phia1.erase( std::remove(phia1.begin(), phia1.end(), phia.at(i)), phia1.end() ) ;
assert(phia1.size() == pa);
for(auto i = 0ul; i < r; ++i)
nc[ i ] = na[ phia1[ i ] - 1 ];
for(auto i = 0ul; i < phib.size(); ++i)
* std::remove(phib1.begin(), phib1.end(), phib.at(i)) = phib.at(i) ;
//phib1.erase( std::remove(phib1.begin(), phib1.end(), phia.at(i)), phib1.end() ) ;
assert(phib1.size() == pb);
for(auto i = 0ul; i < s; ++i)
nc[ r + i ] = nb[ phib1[ i ] - 1 ];
// std::copy( phib.begin(), phib.end(), phib1.end() );
assert( phia1.size() == pa );
assert( phib1.size() == pb );
auto c = tensor_type(extents_type(nc), value_type{});
ttt(pa, pb, q,
phia1.data(), phib1.data(),
c.data(), c.extents().data(), c.strides().data(),
a.data(), a.extents().data(), a.strides().data(),
b.data(), b.extents().data(), b.strides().data());
return c;
}
//template<class V, class F, class A1, class A2, std::size_t N, std::size_t M>
//auto operator*( tensor_index<V,F,A1,N> const& lhs, tensor_index<V,F,A2,M> const& rhs)
/** @brief Computes the q-mode tensor-times-tensor product
*
* Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] )
*
* @note calls ublas::ttt
*
* na[phi[x]] = nb[phi[x]] for 1 <= x <= q
*
* @param[in] phi one-based permutation tuple of length q for bot input tensors
* @param[in] a left-hand side tensor with order r+q
* @param[in] b right-hand side tensor with order s+q
* @result tensor with order r+s
*/
template<class V, class F, class A1, class A2>
auto prod(tensor<V,F,A1> const& a, tensor<V,F,A2> const& b,
std::vector<std::size_t> const& phi)
{
return prod(a, b, phi, phi);
}
/** @brief Computes the inner product of two tensors
*
* Implements c = sum(A[i1,i2,...,ip] * B[i1,i2,...,jp])
*
* @note calls inner function
*
* @param[in] a tensor object A
* @param[in] b tensor object B
*
* @returns a value type.
*/
template<class V, class F, class A1, class A2>
auto inner_prod(tensor<V,F,A1> const& a, tensor<V,F,A2> const& b)
{
using value_type = typename tensor<V,F,A1>::value_type;
if( a.rank() != b.rank() )
throw std::length_error("error in boost::numeric::ublas::inner_prod: Rank of both tensors must be the same.");
if( a.empty() || b.empty())
throw std::length_error("error in boost::numeric::ublas::inner_prod: Tensors should not be empty.");
if( a.extents() != b.extents())
throw std::length_error("error in boost::numeric::ublas::inner_prod: Tensor extents should be the same.");
return inner(a.rank(), a.extents().data(),
a.data(), a.strides().data(),
b.data(), b.strides().data(), value_type{0});
}
/** @brief Computes the outer product of two tensors
*
* Implements C[i1,...,ip,j1,...,jq] = A[i1,i2,...,ip] * B[j1,j2,...,jq]
*
* @note calls outer function
*
* @param[in] a tensor object A
* @param[in] b tensor object B
*
* @returns tensor object C with the same storage format F and allocator type A1
*/
template<class V, class F, class A1, class A2>
auto outer_prod(tensor<V,F,A1> const& a, tensor<V,F,A2> const& b)
{
using tensor_type = tensor<V,F,A1>;
using extents_type = typename tensor_type::extents_type;
if( a.empty() || b.empty() )
throw std::runtime_error("error in boost::numeric::ublas::outer_prod: tensors should not be empty.");
auto nc = typename extents_type::base_type(a.rank() + b.rank());
for(auto i = 0u; i < a.rank(); ++i)
nc.at(i) = a.extents().at(i);
for(auto i = 0u; i < b.rank(); ++i)
nc.at(a.rank()+i) = b.extents().at(i);
auto c = tensor_type(extents_type(nc));
outer(c.data(), c.rank(), c.extents().data(), c.strides().data(),
a.data(), a.rank(), a.extents().data(), a.strides().data(),
b.data(), b.rank(), b.extents().data(), b.strides().data());
return c;
}
/** @brief Transposes a tensor according to a permutation tuple
*
* Implements C[tau[i1],tau[i2]...,tau[ip]] = A[i1,i2,...,ip]
*
* @note calls trans function
*
* @param[in] a tensor object of rank p
* @param[in] tau one-based permutation tuple of length p
* @returns a transposed tensor object with the same storage format F and allocator type A
*/
template<class V, class F, class A>
auto trans(tensor<V,F,A> const& a, std::vector<std::size_t> const& tau)
{
using tensor_type = tensor<V,F,A>;
using extents_type = typename tensor_type::extents_type;
// using strides_type = typename tensor_type::strides_type;
if( a.empty() )
return tensor<V,F,A>{};
auto const p = a.rank();
auto const& na = a.extents();
auto nc = typename extents_type::base_type (p);
for(auto i = 0u; i < p; ++i)
nc.at(tau.at(i)-1) = na.at(i);
// auto wc = strides_type(extents_type(nc));
auto c = tensor_type(extents_type(nc));
trans( a.rank(), a.extents().data(), tau.data(),
c.data(), c.strides().data(),
a.data(), a.strides().data());
// auto wc_pi = typename strides_type::base_type (p);
// for(auto i = 0u; i < p; ++i)
// wc_pi.at(tau.at(i)-1) = c.strides().at(i);
//copy(a.rank(),
// a.extents().data(),
// c.data(), wc_pi.data(),
// a.data(), a.strides().data() );
return c;
}
/** @brief Computes the frobenius norm of a tensor expression
*
* @note evaluates the tensor expression and calls the accumulate function
*
*
* Implements the two-norm with
* k = sqrt( sum_(i1,...,ip) A(i1,...,ip)^2 )
*
* @param[in] a tensor object of rank p
* @returns the frobenius norm of the tensor
*/
//template<class V, class F, class A>
//auto norm(tensor<V,F,A> const& a)
template<class T, class D>
auto norm(detail::tensor_expression<T,D> const& expr)
{
using tensor_type = typename detail::tensor_expression<T,D>::tensor_type;
using value_type = typename tensor_type::value_type;
auto a = tensor_type( expr );
if( a.empty() )
throw std::runtime_error("error in boost::numeric::ublas::norm: tensors should not be empty.");
return std::sqrt( accumulate( a.order(), a.extents().data(), a.data(), a.strides().data(), value_type{},
[](auto const& l, auto const& r){ return l + r*r; } ) ) ;
}
/** @brief Extract the real component of tensor elements within a tensor expression
*
* @param[in] lhs tensor expression
* @returns unary tensor expression
*/
template<class T, class D>
auto real(detail::tensor_expression<T,D> const& expr) {
return detail::make_unary_tensor_expression<T> (expr(), [] (auto const& l) { return std::real( l ); } );
}
/** @brief Extract the real component of tensor elements within a tensor expression
*
* @param[in] lhs tensor expression
* @returns unary tensor expression
*/
template<class V, class F, class A, class D>
auto real(detail::tensor_expression<tensor<std::complex<V>,F,A>,D> const& expr)
{
using tensor_complex_type = tensor<std::complex<V>,F,A>;
using tensor_type = tensor<V,F,typename storage_traits<A>::template rebind<V>>;
if( detail::retrieve_extents( expr ).empty() )
throw std::runtime_error("error in boost::numeric::ublas::real: tensors should not be empty.");
auto a = tensor_complex_type( expr );
auto c = tensor_type( a.extents() );
std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::real(l) ; } );
return c;
}
/** @brief Extract the imaginary component of tensor elements within a tensor expression
*
* @param[in] lhs tensor expression
* @returns unary tensor expression
*/
template<class T, class D>
auto imag(detail::tensor_expression<T,D> const& lhs) {
return detail::make_unary_tensor_expression<T> (lhs(), [] (auto const& l) { return std::imag( l ); } );
}
/** @brief Extract the imag component of tensor elements within a tensor expression
*
* @param[in] lhs tensor expression
* @returns unary tensor expression
*/
template<class V, class A, class F, class D>
auto imag(detail::tensor_expression<tensor<std::complex<V>,F,A>,D> const& expr)
{
using tensor_complex_type = tensor<std::complex<V>,F,A>;
using tensor_type = tensor<V,F,typename storage_traits<A>::template rebind<V>>;
if( detail::retrieve_extents( expr ).empty() )
throw std::runtime_error("error in boost::numeric::ublas::real: tensors should not be empty.");
auto a = tensor_complex_type( expr );
auto c = tensor_type( a.extents() );
std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::imag(l) ; } );
return c;
}
/** @brief Computes the complex conjugate component of tensor elements within a tensor expression
*
* @param[in] expr tensor expression
* @returns complex tensor
*/
template<class T, class D>
auto conj(detail::tensor_expression<T,D> const& expr)
{
using tensor_type = T;
using value_type = typename tensor_type::value_type;
using layout_type = typename tensor_type::layout_type;
using array_type = typename tensor_type::array_type;
using new_value_type = std::complex<value_type>;
using new_array_type = typename storage_traits<array_type>::template rebind<new_value_type>;
using tensor_complex_type = tensor<new_value_type,layout_type, new_array_type>;
if( detail::retrieve_extents( expr ).empty() )
throw std::runtime_error("error in boost::numeric::ublas::conj: tensors should not be empty.");
auto a = tensor_type( expr );
auto c = tensor_complex_type( a.extents() );
std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::conj(l) ; } );
return c;
}
/** @brief Computes the complex conjugate component of tensor elements within a tensor expression
*
* @param[in] lhs tensor expression
* @returns unary tensor expression
*/
template<class V, class A, class F, class D>
auto conj(detail::tensor_expression<tensor<std::complex<V>,F,A>,D> const& expr)
{
return detail::make_unary_tensor_expression<tensor<std::complex<V>,F,A>> (expr(), [] (auto const& l) { return std::conj( l ); } );
}
}
}
}
#endif

View File

@@ -0,0 +1,89 @@
//
// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// Fraunhofer IOSB, Ettlingen, Germany
//
#ifndef BOOST_UBLAS_TENSOR_INDEX_HPP
#define BOOST_UBLAS_TENSOR_INDEX_HPP
#include <cstddef>
#include <array>
#include <vector>
namespace boost {
namespace numeric {
namespace ublas {
namespace index {
/** @brief Proxy template class for the einstein summation notation
*
* @note index::index_type<K> for 0<=K<=16 is used in tensor::operator()
*
* @tparam I wrapped integer
*/
template<std::size_t I>
struct index_type
{
static constexpr std::size_t value = I;
constexpr bool operator == (std::size_t other) const { return value == other; }
constexpr bool operator != (std::size_t other) const { return value != other; }
template <std::size_t K>
constexpr bool operator == (index_type<K> /*other*/) const { return I==K; }
template <std::size_t K>
constexpr bool operator != (index_type<K> /*other*/) const { return I!=K; }
constexpr bool operator == (index_type /*other*/) const { return true; }
constexpr bool operator != (index_type /*other*/) const { return false; }
constexpr std::size_t operator()() const { return I; }
};
/** @brief Proxy classes for the einstein summation notation
*
* @note index::_a ... index::_z is used in tensor::operator()
*/
static constexpr index_type< 0> _;
static constexpr index_type< 1> _a;
static constexpr index_type< 2> _b;
static constexpr index_type< 3> _c;
static constexpr index_type< 4> _d;
static constexpr index_type< 5> _e;
static constexpr index_type< 6> _f;
static constexpr index_type< 7> _g;
static constexpr index_type< 8> _h;
static constexpr index_type< 9> _i;
static constexpr index_type<10> _j;
static constexpr index_type<11> _k;
static constexpr index_type<12> _l;
static constexpr index_type<13> _m;
static constexpr index_type<14> _n;
static constexpr index_type<15> _o;
static constexpr index_type<16> _p;
static constexpr index_type<17> _q;
static constexpr index_type<18> _r;
static constexpr index_type<19> _s;
static constexpr index_type<20> _t;
static constexpr index_type<21> _u;
static constexpr index_type<22> _v;
static constexpr index_type<23> _w;
static constexpr index_type<24> _x;
static constexpr index_type<25> _y;
static constexpr index_type<26> _z;
} // namespace indices
}
}
}
#endif // _BOOST_UBLAS_TENSOR_INDEX_HPP_

View File

@@ -0,0 +1,110 @@
//
// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// Fraunhofer IOSB, Ettlingen, Germany
//
#ifndef BOOST_UBLAS_TENSOR_MULTI_INDEX_HPP
#define BOOST_UBLAS_TENSOR_MULTI_INDEX_HPP
#include <cstddef>
#include <array>
#include <vector>
#include "multi_index_utility.hpp"
namespace boost {
namespace numeric {
namespace ublas {
namespace index {
template<std::size_t I>
struct index_type;
} // namespace indices
}
}
}
namespace boost {
namespace numeric {
namespace ublas {
/** @brief Proxy class for the einstein summation notation
*
* Denotes an array of index_type types ::_a for 0<=K<=16 is used in tensor::operator()
*/
template<std::size_t N>
class multi_index
{
public:
multi_index() = delete;
template<std::size_t I, class ... indexes>
constexpr multi_index(index::index_type<I> const& i, indexes ... is )
: _base{i(), is()... }
{
static_assert( sizeof...(is)+1 == N,
"Static assert in boost::numeric::ublas::multi_index: number of constructor arguments is not equal to the template parameter." );
static_assert( valid_multi_index<std::tuple<index::index_type<I>, indexes ...> >::value,
"Static assert in boost::numeric::ublas::multi_index: indexes occur twice in multi-index." );
}
multi_index(multi_index const& other)
: _base(other._base)
{
}
multi_index& operator=(multi_index const& other)
{
this->_base = other._base;
return *this;
}
~multi_index() = default;
auto const& base() const { return _base; }
constexpr auto size() const { return _base.size(); }
constexpr auto at(std::size_t i) const { return _base.at(i); }
constexpr auto operator[](std::size_t i) const { return _base.at(i); }
private:
std::array<std::size_t, N> _base;
};
template<std::size_t K, std::size_t N>
constexpr auto get(multi_index<N> const& m) { return std::get<K>(m.base()); }
template<std::size_t M, std::size_t N>
auto array_to_vector(multi_index<M> const& lhs, multi_index<N> const& rhs)
{
using vtype = std::vector<std::size_t>;
auto pair_of_vector = std::make_pair( vtype {}, vtype{} );
for(auto i = 0u; i < N; ++i)
for(auto j = 0u; j < M; ++j)
if ( lhs.at(i) == rhs.at(j) && lhs.at(i) != boost::numeric::ublas::index::_())
pair_of_vector.first .push_back( i+1 ),
pair_of_vector.second.push_back( j+1 );
return pair_of_vector;
}
} // namespace ublas
} // namespace numeric
} // namespace boost
#endif // MULTI_INDEX_HPP

View File

@@ -0,0 +1,364 @@
//
// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// Fraunhofer IOSB, Ettlingen, Germany
//
#ifndef BOOST_UBLAS_TENSOR_MULTI_INDEX_UTILITY_HPP
#define BOOST_UBLAS_TENSOR_MULTI_INDEX_UTILITY_HPP
#include <tuple>
#include <type_traits>
namespace boost {
namespace numeric {
namespace ublas {
namespace detail {
template<class ... index_types>
struct has_index_impl;
template<class itype_left, class itype_right>
struct has_index_impl<itype_left, itype_right>
{
static constexpr bool value = itype_left::value == itype_right::value;
};
template<class itype_left>
struct has_index_impl <itype_left, std::tuple<> >
{
static constexpr bool value = false;
};
template<class itype_left, class itype_right>
struct has_index_impl <itype_left, std::tuple<itype_right> >
{
static constexpr bool value = has_index_impl<itype_left,itype_right>::value;
};
template<class itype_left, class itype_right, class ... index_types>
struct has_index_impl <itype_left, std::tuple<itype_right, index_types...> >
{
using next_type = has_index_impl<itype_left, std::tuple<index_types...>>;
static constexpr bool value = has_index_impl<itype_left,itype_right>::value || next_type::value;
};
} // namespace detail
/** @brief has_index is true if index occurs once or more in a multi-index
*
* @note a multi-index represents as tuple of single indexes of type boost::numeric::ublas::index::index_type
*
* @code auto has_index_value = has_index<index_type<1>, std::tuple<index_type<2>,index_type<1>> >::value; @endcode
*
* @tparam index_type type of index
* @tparam tuple_type type of std::tuple representing a multi-index
*/
template<class index_type, class tuple_type>
struct has_index
{
static constexpr bool value = detail::has_index_impl<std::decay_t<index_type>,std::decay_t<tuple_type>>::value;
};
} // namespace ublas
} // namespace numeric
} // namespace boost
////////////////////////////////////////////////
////////////////////////////////////////////////
namespace boost {
namespace numeric {
namespace ublas {
namespace detail {
template<class ... index_types>
struct valid_multi_index_impl;
template<>
struct valid_multi_index_impl<std::tuple<>>
{
static constexpr bool value = true;
};
template<class itype>
struct valid_multi_index_impl<std::tuple<itype>>
{
static constexpr bool value = true;
};
template<class itype, class ... index_types>
struct valid_multi_index_impl<std::tuple<itype,index_types...>>
{
using ttype = std::tuple<index_types...>;
using has_index_type = has_index<itype, ttype>;
static constexpr bool is_index_zero = itype::value==0ul;
static constexpr bool has_index_value = has_index_type::value && !is_index_zero;
static constexpr bool value = !has_index_value && valid_multi_index_impl<ttype>::value;
};
} // namespace detail
/** @brief valid_multi_index is true if indexes occur only once in a multi-index
*
* @note a multi-index represents as tuple of single indexes of type boost::numeric::ublas::index::index_type
*
* @code auto valid = valid_multi_index< std::tuple<index_type<2>,index_type<1>> >::value;
* @endcode
*
* @tparam tuple_type type of std::tuple representing a multi-index
*/
template<class tupe_type>
struct valid_multi_index
{
static constexpr bool value = detail::valid_multi_index_impl<std::decay_t<tupe_type>>::value;
};
} // namespace ublas
} // namespace numeric
} // namespace boost
////////////////////////////////////////////////
////////////////////////////////////////////////
namespace boost {
namespace numeric {
namespace ublas {
namespace detail {
template<class ... index_types >
struct number_equal_indexes_impl;
template<class ... itypes_right >
struct number_equal_indexes_impl < std::tuple<>, std::tuple<itypes_right...>>
{
static constexpr unsigned value = 0;
};
template<class itype, class ... itypes_left, class ... itypes_right>
struct number_equal_indexes_impl < std::tuple<itype,itypes_left...>, std::tuple<itypes_right...>>
{
using tuple_right = std::tuple<itypes_right...>;
using has_index_type = has_index<itype, tuple_right>;
static constexpr bool is_index_zero = itype::value==0ul;
static constexpr bool has_index_value = has_index_type::value && !is_index_zero;
using next_type = number_equal_indexes_impl< std::tuple<itypes_left...>, tuple_right >;
static constexpr unsigned v = has_index_value ? 1 : 0;
static constexpr unsigned value = v + next_type::value;
};
} // namespace detail
/** @brief number_equal_indexes contains the number of equal indexes of two multi-indexes
*
* @note a multi-index represents as tuple of single indexes of type boost::numeric::ublas::index::index_type
*
*
* @code auto num = number_equal_indexes<
* std::tuple<index_type<2>,index_type<1>>,
* std::tuple<index_type<1>,index_type<3>> >::value;
* @endcode
*
* @tparam tuple_type_left type of left std::tuple representing a multi-index
* @tparam tuple_type_right type of right std::tuple representing a multi-index
*/
template<class tuple_left, class tuple_right>
struct number_equal_indexes
{
static constexpr unsigned value =
detail::number_equal_indexes_impl< std::decay_t<tuple_left>, std::decay_t<tuple_right>>::value;
};
} // namespace ublas
} // namespace numeric
} // namespace boost
////////////////////////////////////////////////
////////////////////////////////////////////////
namespace boost {
namespace numeric {
namespace ublas {
namespace detail {
template<std::size_t r, std::size_t m, class itype, class ttype>
struct index_position_impl
{
static constexpr auto is_same = std::is_same< std::decay_t<itype>, std::decay_t<std::tuple_element_t<r,ttype>> >::value;
static constexpr auto value = is_same ? r : index_position_impl<r+1,m,itype,ttype>::value;
};
template<std::size_t m, class itype, class ttype>
struct index_position_impl < m, m, itype, ttype>
{
static constexpr auto value = std::tuple_size<ttype>::value;
};
} // namespace detail
/** @brief index_position contains the zero-based index position of an index type within a multi-index
*
* @note a multi-index represents as tuple of single indexes of type boost::numeric::ublas::index::index_type
*
* @code auto num = index_position<
* index_type<1>,
* std::tuple<index_type<2>,index_type<1>> >::value;
* @endcode
*
* @returns value returns 0 and N-1 if index_type is found, N otherwise where N is tuple_size_v<tuple_type>.
*
* @tparam index_type type of index
* @tparam tuple_type type of std::tuple that is searched for index
*/
template<class index_type, class tuple_type>
struct index_position
{
static constexpr auto value = detail::index_position_impl<0ul,std::tuple_size<tuple_type>::value,std::decay_t<index_type>,std::decay_t<tuple_type>>::value;
};
} // namespace ublas
} // namespace numeric
} // namespace boost
////////////////////////////////////////////////
////////////////////////////////////////////////
namespace boost {
namespace numeric {
namespace ublas {
namespace detail {
template<std::size_t r, std::size_t m>
struct index_position_pairs_impl
{
template<class array_type, class tuple_left, class tuple_right>
static constexpr void run(array_type& out, tuple_left const& lhs, tuple_right const& rhs, std::size_t p)
{
using index_type = std::tuple_element_t<r-1,tuple_left>;
using has_index_type = has_index<index_type, tuple_right>;
using get_index_type = index_position<index_type,tuple_right>;
using next_type = index_position_pairs_impl<r+1,m>;
if constexpr ( has_index_type::value && index_type::value != 0)
out[p++] = std::make_pair(r-1,get_index_type::value);
next_type::run( out, lhs, rhs, p );
}
};
template<std::size_t m>
struct index_position_pairs_impl<m,m>
{
template<class array_type, class tuple_left, class tuple_right>
static constexpr void run(array_type& out, tuple_left const& , tuple_right const& , std::size_t p)
{
using index_type = std::tuple_element_t<m-1,tuple_left>;
using has_index_type = has_index<index_type, tuple_right>;
using get_index_type = index_position<index_type, tuple_right>;
if constexpr ( has_index_type::value && index_type::value != 0 )
out[p] = std::make_pair(m-1,get_index_type::value);
}
};
template<std::size_t r>
struct index_position_pairs_impl<r,0>
{
template<class array_type, class tuple_left, class tuple_right>
static constexpr void run(array_type&, tuple_left const& , tuple_right const& , std::size_t)
{}
};
} // namespace detail
/** @brief index_position_pairs returns zero-based index positions of matching indexes of two multi-indexes
*
* @note a multi-index represents as tuple of single indexes of type boost::numeric::ublas::index::index_type
*
* @code auto pairs = index_position_pairs(std::make_tuple(_a,_b), std::make_tuple(_b,_c));
* @endcode
*
* @returns a std::array instance containing index position pairs of type std::pair<std::size_t, std::size_t>.
*
* @param lhs left std::tuple instance representing a multi-index
* @param rhs right std::tuple instance representing a multi-index
*/
template<class tuple_left, class tuple_right>
auto index_position_pairs(tuple_left const& lhs, tuple_right const& rhs)
{
using pair_type = std::pair<std::size_t,std::size_t>;
constexpr auto m = std::tuple_size<tuple_left >::value;
constexpr auto p = number_equal_indexes<tuple_left, tuple_right>::value;
auto array = std::array<pair_type,p>{};
detail::index_position_pairs_impl<1,m>::run(array, lhs, rhs,0);
return array;
}
} // namespace ublas
} // namespace numeric
} // namespace boost
////////////////////////////
////////////////////////////
////////////////////////////
////////////////////////////
namespace boost {
namespace numeric {
namespace ublas {
namespace detail {
template<class array_type, std::size_t ... R>
constexpr auto array_to_vector_impl( array_type const& array, std::index_sequence<R...> )
{
return std::make_pair(
std::vector<std::size_t>{std::get<0>( std::get<R>(array) )+1 ...} ,
std::vector<std::size_t>{std::get<1>( std::get<R>(array) )+1 ...} );
}
} // namespace detail
/** @brief array_to_vector converts a std::array of zero-based index position pairs into two std::vector of one-based index positions
*
* @code auto two_vectors = array_to_vector(std::make_array ( std::make_pair(1,2), std::make_pair(3,4) ) ) ;
* @endcode
*
* @returns two std::vector of one-based index positions
*
* @param array std::array of zero-based index position pairs
*/
template<class pair_type, std::size_t N>
constexpr auto array_to_vector( std::array<pair_type,N> const& array)
{
constexpr auto sequence = std::make_index_sequence<N>{};
return detail::array_to_vector_impl( array, sequence );
}
} // namespace ublas
} // namespace numeric
} // namespace boost
#endif // _BOOST_UBLAS_TENSOR_MULTI_INDEX_UTILITY_HPP_

View File

@@ -0,0 +1,945 @@
//
// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// Fraunhofer IOSB, Ettlingen, Germany
//
#ifndef BOOST_UBLAS_TENSOR_MULTIPLICATION
#define BOOST_UBLAS_TENSOR_MULTIPLICATION
#include <cassert>
namespace boost {
namespace numeric {
namespace ublas {
namespace detail {
namespace recursive {
/** @brief Computes the tensor-times-tensor product for q contraction modes
*
* Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] )
*
* nc[x] = na[phia[x] ] for 1 <= x <= r
* nc[r+x] = nb[phib[x] ] for 1 <= x <= s
* na[phia[r+x]] = nb[phib[s+x]] for 1 <= x <= q
*
* @note is used in function ttt
*
* @param k zero-based recursion level starting with 0
* @param r number of non-contraction indices of A
* @param s number of non-contraction indices of B
* @param q number of contraction indices with q > 0
* @param phia pointer to the permutation tuple of length q+r for A
* @param phib pointer to the permutation tuple of length q+s for B
* @param c pointer to the output tensor C with rank(A)=r+s
* @param nc pointer to the extents of tensor C
* @param wc pointer to the strides of tensor C
* @param a pointer to the first input tensor with rank(A)=r+q
* @param na pointer to the extents of the first input tensor A
* @param wa pointer to the strides of the first input tensor A
* @param b pointer to the second input tensor B with rank(B)=s+q
* @param nb pointer to the extents of the second input tensor B
* @param wb pointer to the strides of the second input tensor B
*/
template <class PointerOut, class PointerIn1, class PointerIn2, class SizeType>
void ttt(SizeType const k,
SizeType const r, SizeType const s, SizeType const q,
SizeType const*const phia, SizeType const*const phib,
PointerOut c, SizeType const*const nc, SizeType const*const wc,
PointerIn1 a, SizeType const*const na, SizeType const*const wa,
PointerIn2 b, SizeType const*const nb, SizeType const*const wb)
{
if(k < r)
{
assert(nc[k] == na[phia[k]-1]);
for(size_t ic = 0u; ic < nc[k]; a += wa[phia[k]-1], c += wc[k], ++ic)
ttt(k+1, r, s, q, phia,phib, c, nc, wc, a, na, wa, b, nb, wb);
}
else if(k < r+s)
{
assert(nc[k] == nb[phib[k-r]-1]);
for(size_t ic = 0u; ic < nc[k]; b += wb[phib[k-r]-1], c += wc[k], ++ic)
ttt(k+1, r, s, q, phia, phib, c, nc, wc, a, na, wa, b, nb, wb);
}
else if(k < r+s+q-1)
{
assert(na[phia[k-s]-1] == nb[phib[k-r]-1]);
for(size_t ia = 0u; ia < na[phia[k-s]-1]; a += wa[phia[k-s]-1], b += wb[phib[k-r]-1], ++ia)
ttt(k+1, r, s, q, phia, phib, c, nc, wc, a, na, wa, b, nb, wb);
}
else
{
assert(na[phia[k-s]-1] == nb[phib[k-r]-1]);
for(size_t ia = 0u; ia < na[phia[k-s]-1]; a += wa[phia[k-s]-1], b += wb[phib[k-r]-1], ++ia)
*c += *a * *b;
}
}
/** @brief Computes the tensor-times-tensor product for q contraction modes
*
* Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] )
*
* @note no permutation tuple is used
*
* nc[x] = na[x ] for 1 <= x <= r
* nc[r+x] = nb[x ] for 1 <= x <= s
* na[r+x] = nb[s+x] for 1 <= x <= q
*
* @note is used in function ttt
*
* @param k zero-based recursion level starting with 0
* @param r number of non-contraction indices of A
* @param s number of non-contraction indices of B
* @param q number of contraction indices with q > 0
* @param c pointer to the output tensor C with rank(A)=r+s
* @param nc pointer to the extents of tensor C
* @param wc pointer to the strides of tensor C
* @param a pointer to the first input tensor with rank(A)=r+q
* @param na pointer to the extents of the first input tensor A
* @param wa pointer to the strides of the first input tensor A
* @param b pointer to the second input tensor B with rank(B)=s+q
* @param nb pointer to the extents of the second input tensor B
* @param wb pointer to the strides of the second input tensor B
*/
template <class PointerOut, class PointerIn1, class PointerIn2, class SizeType>
void ttt(SizeType const k,
SizeType const r, SizeType const s, SizeType const q,
PointerOut c, SizeType const*const nc, SizeType const*const wc,
PointerIn1 a, SizeType const*const na, SizeType const*const wa,
PointerIn2 b, SizeType const*const nb, SizeType const*const wb)
{
if(k < r)
{
assert(nc[k] == na[k]);
for(size_t ic = 0u; ic < nc[k]; a += wa[k], c += wc[k], ++ic)
ttt(k+1, r, s, q, c, nc, wc, a, na, wa, b, nb, wb);
}
else if(k < r+s)
{
assert(nc[k] == nb[k-r]);
for(size_t ic = 0u; ic < nc[k]; b += wb[k-r], c += wc[k], ++ic)
ttt(k+1, r, s, q, c, nc, wc, a, na, wa, b, nb, wb);
}
else if(k < r+s+q-1)
{
assert(na[k-s] == nb[k-r]);
for(size_t ia = 0u; ia < na[k-s]; a += wa[k-s], b += wb[k-r], ++ia)
ttt(k+1, r, s, q, c, nc, wc, a, na, wa, b, nb, wb);
}
else
{
assert(na[k-s] == nb[k-r]);
for(size_t ia = 0u; ia < na[k-s]; a += wa[k-s], b += wb[k-r], ++ia)
*c += *a * *b;
}
}
/** @brief Computes the tensor-times-matrix product for the contraction mode m > 0
*
* Implements C[i1,i2,...,im-1,j,im+1,...,ip] = sum(A[i1,i2,...,im,...,ip] * B[j,im])
*
* @note is used in function ttm
*
* @param m zero-based contraction mode with 0<m<p
* @param r zero-based recursion level starting with p-1
* @param c pointer to the output tensor
* @param nc pointer to the extents of tensor c
* @param wc pointer to the strides of tensor c
* @param a pointer to the first input tensor
* @param na pointer to the extents of input tensor a
* @param wa pointer to the strides of input tensor a
* @param b pointer to the second input tensor
* @param nb pointer to the extents of input tensor b
* @param wb pointer to the strides of input tensor b
*/
template <class PointerOut, class PointerIn1, class PointerIn2, class SizeType>
void ttm(SizeType const m, SizeType const r,
PointerOut c, SizeType const*const nc, SizeType const*const wc,
PointerIn1 a, SizeType const*const na, SizeType const*const wa,
PointerIn2 b, SizeType const*const nb, SizeType const*const wb)
{
if(r == m) {
ttm(m, r-1, c, nc, wc, a, na, wa, b, nb, wb);
}
else if(r == 0){
for(auto i0 = 0ul; i0 < nc[0]; c += wc[0], a += wa[0], ++i0) {
auto cm = c;
auto b0 = b;
for(auto i0 = 0ul; i0 < nc[m]; cm += wc[m], b0 += wb[0], ++i0){
auto am = a;
auto b1 = b0;
for(auto i1 = 0ul; i1 < nb[1]; am += wa[m], b1 += wb[1], ++i1)
*cm += *am * *b1;
}
}
}
else{
for(auto i = 0ul; i < na[r]; c += wc[r], a += wa[r], ++i)
ttm(m, r-1, c, nc, wc, a, na, wa, b, nb, wb);
}
}
/** @brief Computes the tensor-times-matrix product for the contraction mode m = 0
*
* Implements C[j,i2,...,ip] = sum(A[i1,i2,...,ip] * B[j,i1])
*
* @note is used in function ttm
*
* @param m zero-based contraction mode with 0<m<p
* @param r zero-based recursion level starting with p-1
* @param c pointer to the output tensor
* @param nc pointer to the extents of tensor c
* @param wc pointer to the strides of tensor c
* @param a pointer to the first input tensor
* @param na pointer to the extents of input tensor a
* @param wa pointer to the strides of input tensor a
* @param b pointer to the second input tensor
* @param nb pointer to the extents of input tensor b
* @param wb pointer to the strides of input tensor b
*/
template <class PointerOut, class PointerIn1, class PointerIn2, class SizeType>
void ttm0( SizeType const r,
PointerOut c, SizeType const*const nc, SizeType const*const wc,
PointerIn1 a, SizeType const*const na, SizeType const*const wa,
PointerIn2 b, SizeType const*const nb, SizeType const*const wb)
{
if(r > 1){
for(auto i = 0ul; i < na[r]; c += wc[r], a += wa[r], ++i)
ttm0(r-1, c, nc, wc, a, na, wa, b, nb, wb);
}
else{
for(auto i1 = 0ul; i1 < nc[1]; c += wc[1], a += wa[1], ++i1) {
auto cm = c;
auto b0 = b;
// r == m == 0
for(auto i0 = 0ul; i0 < nc[0]; cm += wc[0], b0 += wb[0], ++i0){
auto am = a;
auto b1 = b0;
for(auto i1 = 0u; i1 < nb[1]; am += wa[0], b1 += wb[1], ++i1){
*cm += *am * *b1;
}
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
/** @brief Computes the tensor-times-vector product for the contraction mode m > 0
*
* Implements C[i1,i2,...,im-1,im+1,...,ip] = sum(A[i1,i2,...,im,...,ip] * b[im])
*
* @note is used in function ttv
*
* @param m zero-based contraction mode with 0<m<p
* @param r zero-based recursion level starting with p-1 for tensor A
* @param q zero-based recursion level starting with p-1 for tensor C
* @param c pointer to the output tensor
* @param nc pointer to the extents of tensor c
* @param wc pointer to the strides of tensor c
* @param a pointer to the first input tensor
* @param na pointer to the extents of input tensor a
* @param wa pointer to the strides of input tensor a
* @param b pointer to the second input tensor
*/
template <class PointerOut, class PointerIn1, class PointerIn2, class SizeType>
void ttv( SizeType const m, SizeType const r, SizeType const q,
PointerOut c, SizeType const*const nc, SizeType const*const wc,
PointerIn1 a, SizeType const*const na, SizeType const*const wa,
PointerIn2 b)
{
if(r == m) {
ttv(m, r-1, q, c, nc, wc, a, na, wa, b);
}
else if(r == 0){
for(auto i0 = 0u; i0 < na[0]; c += wc[0], a += wa[0], ++i0) {
auto c1 = c; auto a1 = a; auto b1 = b;
for(auto im = 0u; im < na[m]; a1 += wa[m], ++b1, ++im)
*c1 += *a1 * *b1;
}
}
else{
for(auto i = 0u; i < na[r]; c += wc[q], a += wa[r], ++i)
ttv(m, r-1, q-1, c, nc, wc, a, na, wa, b);
}
}
/** @brief Computes the tensor-times-vector product for the contraction mode m = 0
*
* Implements C[i2,...,ip] = sum(A[i1,...,ip] * b[i1])
*
* @note is used in function ttv
*
* @param m zero-based contraction mode with m=0
* @param r zero-based recursion level starting with p-1
* @param c pointer to the output tensor
* @param nc pointer to the extents of tensor c
* @param wc pointer to the strides of tensor c
* @param a pointer to the first input tensor
* @param na pointer to the extents of input tensor a
* @param wa pointer to the strides of input tensor a
* @param b pointer to the second input tensor
*/
template <class PointerOut, class PointerIn1, class PointerIn2, class SizeType>
void ttv0(SizeType const r,
PointerOut c, SizeType const*const nc, SizeType const*const wc,
PointerIn1 a, SizeType const*const na, SizeType const*const wa,
PointerIn2 b)
{
if(r > 1){
for(auto i = 0u; i < na[r]; c += wc[r-1], a += wa[r], ++i)
ttv0(r-1, c, nc, wc, a, na, wa, b);
}
else{
for(auto i1 = 0u; i1 < na[1]; c += wc[0], a += wa[1], ++i1)
{
auto c1 = c; auto a1 = a; auto b1 = b;
for(auto i0 = 0u; i0 < na[0]; a1 += wa[0], ++b1, ++i0)
*c1 += *a1 * *b1;
}
}
}
/** @brief Computes the matrix-times-vector product
*
* Implements C[i1] = sum(A[i1,i2] * b[i2]) or C[i2] = sum(A[i1,i2] * b[i1])
*
* @note is used in function ttv
*
* @param[in] m zero-based contraction mode with m=0 or m=1
* @param[out] c pointer to the output tensor C
* @param[in] nc pointer to the extents of tensor C
* @param[in] wc pointer to the strides of tensor C
* @param[in] a pointer to the first input tensor A
* @param[in] na pointer to the extents of input tensor A
* @param[in] wa pointer to the strides of input tensor A
* @param[in] b pointer to the second input tensor B
*/
template <class PointerOut, class PointerIn1, class PointerIn2, class SizeType>
void mtv(SizeType const m,
PointerOut c, SizeType const*const , SizeType const*const wc,
PointerIn1 a, SizeType const*const na, SizeType const*const wa,
PointerIn2 b)
{
// decides whether matrix multiplied with vector or vector multiplied with matrix
const auto o = (m == 0) ? 1 : 0;
for(auto io = 0u; io < na[o]; c += wc[o], a += wa[o], ++io) {
auto c1 = c; auto a1 = a; auto b1 = b;
for(auto im = 0u; im < na[m]; a1 += wa[m], ++b1, ++im)
*c1 += *a1 * *b1;
}
}
/** @brief Computes the matrix-times-matrix product
*
* Implements C[i1,i3] = sum(A[i1,i2] * B[i2,i3])
*
* @note is used in function ttm
*
* @param[out] c pointer to the output tensor C
* @param[in] nc pointer to the extents of tensor C
* @param[in] wc pointer to the strides of tensor C
* @param[in] a pointer to the first input tensor A
* @param[in] na pointer to the extents of input tensor A
* @param[in] wa pointer to the strides of input tensor A
* @param[in] b pointer to the second input tensor B
* @param[in] nb pointer to the extents of input tensor B
* @param[in] wb pointer to the strides of input tensor B
*/
template <class PointerOut, class PointerIn1, class PointerIn2, class SizeType>
void mtm(PointerOut c, SizeType const*const nc, SizeType const*const wc,
PointerIn1 a, SizeType const*const na, SizeType const*const wa,
PointerIn2 b, SizeType const*const nb, SizeType const*const wb)
{
// C(i,j) = A(i,k) * B(k,j)
assert(nc[0] == na[0]);
assert(nc[1] == nb[1]);
assert(na[1] == nb[0]);
auto cj = c; auto bj = b;
for(auto j = 0u; j < nc[1]; cj += wc[1], bj += wb[1], ++j) {
auto bk = bj; auto ak = a;
for(auto k = 0u; k < na[1]; ak += wa[1], bk += wb[0], ++k) {
auto ci = cj; auto ai = ak;
for(auto i = 0u; i < na[0]; ai += wa[0], ci += wc[0], ++i){
*ci += *ai * *bk;
}
}
}
}
/** @brief Computes the inner product of two tensors
*
* Implements c = sum(A[i1,i2,...,ip] * B[i1,i2,...,ip])
*
* @note is used in function inner
*
* @param r zero-based recursion level starting with p-1
* @param n pointer to the extents of input or output tensor
* @param a pointer to the first input tensor
* @param wa pointer to the strides of input tensor a
* @param b pointer to the second input tensor
* @param wb pointer to the strides of tensor b
* @param v previously computed value (start with v = 0).
* @return inner product of two tensors.
*/
template <class PointerIn1, class PointerIn2, class value_t, class SizeType>
value_t inner(SizeType const r, SizeType const*const n,
PointerIn1 a, SizeType const*const wa,
PointerIn2 b, SizeType const*const wb,
value_t v)
{
if(r == 0)
for(auto i0 = 0u; i0 < n[0]; a += wa[0], b += wb[0], ++i0)
v += *a * *b;
else
for(auto ir = 0u; ir < n[r]; a += wa[r], b += wb[r], ++ir)
v = inner(r-1, n, a, wa, b, wb, v);
return v;
}
template <class PointerOut, class PointerIn1, class PointerIn2, class SizeType>
void outer_2x2(SizeType const pa,
PointerOut c, SizeType const*const , SizeType const*const wc,
PointerIn1 a, SizeType const*const na, SizeType const*const wa,
PointerIn2 b, SizeType const*const nb, SizeType const*const wb)
{
// assert(rc == 3);
// assert(ra == 1);
// assert(rb == 1);
for(auto ib1 = 0u; ib1 < nb[1]; b += wb[1], c += wc[pa+1], ++ib1) {
auto c2 = c;
auto b0 = b;
for(auto ib0 = 0u; ib0 < nb[0]; b0 += wb[0], c2 += wc[pa], ++ib0) {
const auto b = *b0;
auto c1 = c2;
auto a1 = a;
for(auto ia1 = 0u; ia1 < na[1]; a1 += wa[1], c1 += wc[1], ++ia1) {
auto a0 = a1;
auto c0 = c1;
for(SizeType ia0 = 0u; ia0 < na[0]; a0 += wa[0], c0 += wc[0], ++ia0)
*c0 = *a0 * b;
}
}
}
}
/** @brief Computes the outer product of two tensors
*
* Implements C[i1,...,ip,j1,...,jq] = A[i1,i2,...,ip] * B[j1,j2,...,jq]
*
* @note called by outer
*
*
* @param[in] pa number of dimensions (rank) of the first input tensor A with pa > 0
*
* @param[in] rc recursion level for C that starts with pc-1
* @param[out] c pointer to the output tensor
* @param[in] nc pointer to the extents of output tensor c
* @param[in] wc pointer to the strides of output tensor c
*
* @param[in] ra recursion level for A that starts with pa-1
* @param[in] a pointer to the first input tensor
* @param[in] na pointer to the extents of the first input tensor a
* @param[in] wa pointer to the strides of the first input tensor a
*
* @param[in] rb recursion level for B that starts with pb-1
* @param[in] b pointer to the second input tensor
* @param[in] nb pointer to the extents of the second input tensor b
* @param[in] wb pointer to the strides of the second input tensor b
*/
template<class PointerOut, class PointerIn1, class PointerIn2, class SizeType>
void outer(SizeType const pa,
SizeType const rc, PointerOut c, SizeType const*const nc, SizeType const*const wc,
SizeType const ra, PointerIn1 a, SizeType const*const na, SizeType const*const wa,
SizeType const rb, PointerIn2 b, SizeType const*const nb, SizeType const*const wb)
{
if(rb > 1)
for(auto ib = 0u; ib < nb[rb]; b += wb[rb], c += wc[rc], ++ib)
outer(pa, rc-1, c, nc, wc, ra, a, na, wa, rb-1, b, nb, wb);
else if(ra > 1)
for(auto ia = 0u; ia < na[ra]; a += wa[ra], c += wc[ra], ++ia)
outer(pa, rc-1, c, nc, wc, ra-1, a, na, wa, rb, b, nb, wb);
else
outer_2x2(pa, c, nc, wc, a, na, wa, b, nb, wb); //assert(ra==1 && rb==1 && rc==3);
}
/** @brief Computes the outer product with permutation tuples
*
* Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir] * B[j1,...,js] )
*
* nc[x] = na[phia[x]] for 1 <= x <= r
* nc[r+x] = nb[phib[x]] for 1 <= x <= s
*
* @note maybe called by ttt function
*
* @param k zero-based recursion level starting with 0
* @param r number of non-contraction indices of A
* @param s number of non-contraction indices of B
* @param phia pointer to the permutation tuple of length r for A
* @param phib pointer to the permutation tuple of length s for B
* @param c pointer to the output tensor C with rank(A)=r+s
* @param nc pointer to the extents of tensor C
* @param wc pointer to the strides of tensor C
* @param a pointer to the first input tensor with rank(A)=r
* @param na pointer to the extents of the first input tensor A
* @param wa pointer to the strides of the first input tensor A
* @param b pointer to the second input tensor B with rank(B)=s
* @param nb pointer to the extents of the second input tensor B
* @param wb pointer to the strides of the second input tensor B
*/
template <class PointerOut, class PointerIn1, class PointerIn2, class SizeType>
void outer(SizeType const k,
SizeType const r, SizeType const s,
SizeType const*const phia, SizeType const*const phib,
PointerOut c, SizeType const*const nc, SizeType const*const wc,
PointerIn1 a, SizeType const*const na, SizeType const*const wa,
PointerIn2 b, SizeType const*const nb, SizeType const*const wb)
{
if(k < r)
{
assert(nc[k] == na[phia[k]-1]);
for(size_t ic = 0u; ic < nc[k]; a += wa[phia[k]-1], c += wc[k], ++ic)
outer(k+1, r, s, phia,phib, c, nc, wc, a, na, wa, b, nb, wb);
}
else if(k < r+s-1)
{
assert(nc[k] == nb[phib[k-r]-1]);
for(size_t ic = 0u; ic < nc[k]; b += wb[phib[k-r]-1], c += wc[k], ++ic)
outer(k+1, r, s, phia, phib, c, nc, wc, a, na, wa, b, nb, wb);
}
else
{
assert(nc[k] == nb[phib[k-r]-1]);
for(size_t ic = 0u; ic < nc[k]; b += wb[phib[k-r]-1], c += wc[k], ++ic)
*c = *a * *b;
}
}
} // namespace recursive
} // namespace detail
} // namespace ublas
} // namespace numeric
} // namespace boost
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
#include <stdexcept>
namespace boost {
namespace numeric {
namespace ublas {
/** @brief Computes the tensor-times-vector product
*
* Implements
* C[i1,i2,...,im-1,im+1,...,ip] = sum(A[i1,i2,...,im,...,ip] * b[im]) for m>1 and
* C[i2,...,ip] = sum(A[i1,...,ip] * b[i1]) for m=1
*
* @note calls detail::ttv, detail::ttv0 or detail::mtv
*
* @param[in] m contraction mode with 0 < m <= p
* @param[in] p number of dimensions (rank) of the first input tensor with p > 0
* @param[out] c pointer to the output tensor with rank p-1
* @param[in] nc pointer to the extents of tensor c
* @param[in] wc pointer to the strides of tensor c
* @param[in] a pointer to the first input tensor
* @param[in] na pointer to the extents of input tensor a
* @param[in] wa pointer to the strides of input tensor a
* @param[in] b pointer to the second input tensor
* @param[in] nb pointer to the extents of input tensor b
* @param[in] wb pointer to the strides of input tensor b
*/
template <class PointerOut, class PointerIn1, class PointerIn2, class SizeType>
void ttv(SizeType const m, SizeType const p,
PointerOut c, SizeType const*const nc, SizeType const*const wc,
const PointerIn1 a, SizeType const*const na, SizeType const*const wa,
const PointerIn2 b, SizeType const*const nb, SizeType const*const wb)
{
static_assert( std::is_pointer<PointerOut>::value & std::is_pointer<PointerIn1>::value & std::is_pointer<PointerIn2>::value,
"Static error in boost::numeric::ublas::ttv: Argument types for pointers are not pointer types.");
if( m == 0)
throw std::length_error("Error in boost::numeric::ublas::ttv: Contraction mode must be greater than zero.");
if( p < m )
throw std::length_error("Error in boost::numeric::ublas::ttv: Rank must be greater equal the modus.");
if( p == 0)
throw std::length_error("Error in boost::numeric::ublas::ttv: Rank must be greater than zero.");
if(c == nullptr || a == nullptr || b == nullptr)
throw std::length_error("Error in boost::numeric::ublas::ttv: Pointers shall not be null pointers.");
for(auto i = 0u; i < m-1; ++i)
if(na[i] != nc[i])
throw std::length_error("Error in boost::numeric::ublas::ttv: Extents (except of dimension mode) of A and C must be equal.");
for(auto i = m; i < p; ++i)
if(na[i] != nc[i-1])
throw std::length_error("Error in boost::numeric::ublas::ttv: Extents (except of dimension mode) of A and C must be equal.");
const auto max = std::max(nb[0], nb[1]);
if( na[m-1] != max)
throw std::length_error("Error in boost::numeric::ublas::ttv: Extent of dimension mode of A and b must be equal.");
if((m != 1) && (p > 2))
detail::recursive::ttv(m-1, p-1, p-2, c, nc, wc, a, na, wa, b);
else if ((m == 1) && (p > 2))
detail::recursive::ttv0(p-1, c, nc, wc, a, na, wa, b);
else if( p == 2 )
detail::recursive::mtv(m-1, c, nc, wc, a, na, wa, b);
else /*if( p == 1 )*/{
auto v = std::remove_pointer_t<std::remove_cv_t<PointerOut>>{};
*c = detail::recursive::inner(SizeType(0), na, a, wa, b, wb, v);
}
}
/** @brief Computes the tensor-times-matrix product
*
* Implements
* C[i1,i2,...,im-1,j,im+1,...,ip] = sum(A[i1,i2,...,im,...,ip] * B[j,im]) for m>1 and
* C[j,i2,...,ip] = sum(A[i1,i2,...,ip] * B[j,i1]) for m=1
*
* @note calls detail::ttm or detail::ttm0
*
* @param[in] m contraction mode with 0 < m <= p
* @param[in] p number of dimensions (rank) of the first input tensor with p > 0
* @param[out] c pointer to the output tensor with rank p-1
* @param[in] nc pointer to the extents of tensor c
* @param[in] wc pointer to the strides of tensor c
* @param[in] a pointer to the first input tensor
* @param[in] na pointer to the extents of input tensor a
* @param[in] wa pointer to the strides of input tensor a
* @param[in] b pointer to the second input tensor
* @param[in] nb pointer to the extents of input tensor b
* @param[in] wb pointer to the strides of input tensor b
*/
template <class PointerIn1, class PointerIn2, class PointerOut, class SizeType>
void ttm(SizeType const m, SizeType const p,
PointerOut c, SizeType const*const nc, SizeType const*const wc,
const PointerIn1 a, SizeType const*const na, SizeType const*const wa,
const PointerIn2 b, SizeType const*const nb, SizeType const*const wb)
{
static_assert( std::is_pointer<PointerOut>::value & std::is_pointer<PointerIn1>::value & std::is_pointer<PointerIn2>::value,
"Static error in boost::numeric::ublas::ttm: Argument types for pointers are not pointer types.");
if( m == 0 )
throw std::length_error("Error in boost::numeric::ublas::ttm: Contraction mode must be greater than zero.");
if( p < m )
throw std::length_error("Error in boost::numeric::ublas::ttm: Rank must be greater equal than the specified mode.");
if( p == 0)
throw std::length_error("Error in boost::numeric::ublas::ttm:Rank must be greater than zero.");
if(c == nullptr || a == nullptr || b == nullptr)
throw std::length_error("Error in boost::numeric::ublas::ttm: Pointers shall not be null pointers.");
for(auto i = 0u; i < m-1; ++i)
if(na[i] != nc[i])
throw std::length_error("Error in boost::numeric::ublas::ttm: Extents (except of dimension mode) of A and C must be equal.");
for(auto i = m; i < p; ++i)
if(na[i] != nc[i])
throw std::length_error("Error in boost::numeric::ublas::ttm: Extents (except of dimension mode) of A and C must be equal.");
if(na[m-1] != nb[1])
throw std::length_error("Error in boost::numeric::ublas::ttm: 2nd Extent of B and M-th Extent of A must be the equal.");
if(nc[m-1] != nb[0])
throw std::length_error("Error in boost::numeric::ublas::ttm: 1nd Extent of B and M-th Extent of C must be the equal.");
if ( m != 1 )
detail::recursive::ttm (m-1, p-1, c, nc, wc, a, na, wa, b, nb, wb);
else /*if (m == 1 && p > 2)*/
detail::recursive::ttm0( p-1, c, nc, wc, a, na, wa, b, nb, wb);
}
/** @brief Computes the tensor-times-tensor product
*
* Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] )
*
* @note calls detail::recursive::ttt or ttm or ttv or inner or outer
*
* nc[x] = na[phia[x] ] for 1 <= x <= r
* nc[r+x] = nb[phib[x] ] for 1 <= x <= s
* na[phia[r+x]] = nb[phib[s+x]] for 1 <= x <= q
*
* @param[in] pa number of dimensions (rank) of the first input tensor a with pa > 0
* @param[in] pb number of dimensions (rank) of the second input tensor b with pb > 0
* @param[in] q number of contraction dimensions with pa >= q and pb >= q and q >= 0
* @param[in] phia pointer to a permutation tuple for the first input tensor a
* @param[in] phib pointer to a permutation tuple for the second input tensor b
* @param[out] c pointer to the output tensor with rank p-1
* @param[in] nc pointer to the extents of tensor c
* @param[in] wc pointer to the strides of tensor c
* @param[in] a pointer to the first input tensor
* @param[in] na pointer to the extents of input tensor a
* @param[in] wa pointer to the strides of input tensor a
* @param[in] b pointer to the second input tensor
* @param[in] nb pointer to the extents of input tensor b
* @param[in] wb pointer to the strides of input tensor b
*/
template <class PointerIn1, class PointerIn2, class PointerOut, class SizeType>
void ttt(SizeType const pa, SizeType const pb, SizeType const q,
SizeType const*const phia, SizeType const*const phib,
PointerOut c, SizeType const*const nc, SizeType const*const wc,
PointerIn1 a, SizeType const*const na, SizeType const*const wa,
PointerIn2 b, SizeType const*const nb, SizeType const*const wb)
{
static_assert( std::is_pointer<PointerOut>::value & std::is_pointer<PointerIn1>::value & std::is_pointer<PointerIn2>::value,
"Static error in boost::numeric::ublas::ttm: Argument types for pointers are not pointer types.");
if( pa == 0 || pb == 0)
throw std::length_error("Error in boost::numeric::ublas::ttt: tensor order must be greater zero.");
if( q > pa && q > pb)
throw std::length_error("Error in boost::numeric::ublas::ttt: number of contraction must be smaller than or equal to the tensor order.");
SizeType const r = pa - q;
SizeType const s = pb - q;
if(c == nullptr || a == nullptr || b == nullptr)
throw std::length_error("Error in boost::numeric::ublas::ttm: Pointers shall not be null pointers.");
for(auto i = 0ul; i < r; ++i)
if( na[phia[i]-1] != nc[i] )
throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and res tensor not correct.");
for(auto i = 0ul; i < s; ++i)
if( nb[phib[i]-1] != nc[r+i] )
throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of rhs and res not correct.");
for(auto i = 0ul; i < q; ++i)
if( nb[phib[s+i]-1] != na[phia[r+i]-1] )
throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and rhs not correct.");
if(q == 0ul)
detail::recursive::outer(SizeType{0},r,s, phia,phib, c,nc,wc, a,na,wa, b,nb,wb);
else
detail::recursive::ttt(SizeType{0},r,s,q, phia,phib, c,nc,wc, a,na,wa, b,nb,wb);
}
/** @brief Computes the tensor-times-tensor product
*
* Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] )
*
* @note calls detail::recursive::ttt or ttm or ttv or inner or outer
*
* nc[x] = na[x ] for 1 <= x <= r
* nc[r+x] = nb[x ] for 1 <= x <= s
* na[r+x] = nb[s+x] for 1 <= x <= q
*
* @param[in] pa number of dimensions (rank) of the first input tensor a with pa > 0
* @param[in] pb number of dimensions (rank) of the second input tensor b with pb > 0
* @param[in] q number of contraction dimensions with pa >= q and pb >= q and q >= 0
* @param[out] c pointer to the output tensor with rank p-1
* @param[in] nc pointer to the extents of tensor c
* @param[in] wc pointer to the strides of tensor c
* @param[in] a pointer to the first input tensor
* @param[in] na pointer to the extents of input tensor a
* @param[in] wa pointer to the strides of input tensor a
* @param[in] b pointer to the second input tensor
* @param[in] nb pointer to the extents of input tensor b
* @param[in] wb pointer to the strides of input tensor b
*/
template <class PointerIn1, class PointerIn2, class PointerOut, class SizeType>
void ttt(SizeType const pa, SizeType const pb, SizeType const q,
PointerOut c, SizeType const*const nc, SizeType const*const wc,
PointerIn1 a, SizeType const*const na, SizeType const*const wa,
PointerIn2 b, SizeType const*const nb, SizeType const*const wb)
{
static_assert( std::is_pointer<PointerOut>::value & std::is_pointer<PointerIn1>::value & std::is_pointer<PointerIn2>::value,
"Static error in boost::numeric::ublas::ttm: Argument types for pointers are not pointer types.");
if( pa == 0 || pb == 0)
throw std::length_error("Error in boost::numeric::ublas::ttt: tensor order must be greater zero.");
if( q > pa && q > pb)
throw std::length_error("Error in boost::numeric::ublas::ttt: number of contraction must be smaller than or equal to the tensor order.");
SizeType const r = pa - q;
SizeType const s = pb - q;
SizeType const pc = r+s;
if(c == nullptr || a == nullptr || b == nullptr)
throw std::length_error("Error in boost::numeric::ublas::ttm: Pointers shall not be null pointers.");
for(auto i = 0ul; i < r; ++i)
if( na[i] != nc[i] )
throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and res tensor not correct.");
for(auto i = 0ul; i < s; ++i)
if( nb[i] != nc[r+i] )
throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of rhs and res not correct.");
for(auto i = 0ul; i < q; ++i)
if( nb[s+i] != na[r+i] )
throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and rhs not correct.");
using value_type = std::decay_t<decltype(*c)>;
if(q == 0ul)
detail::recursive::outer(pa, pc-1, c,nc,wc, pa-1, a,na,wa, pb-1, b,nb,wb);
else if(r == 0ul && s == 0ul)
*c = detail::recursive::inner(q-1, na, a,wa, b,wb, value_type(0) );
else
detail::recursive::ttt(SizeType{0},r,s,q, c,nc,wc, a,na,wa, b,nb,wb);
}
/** @brief Computes the inner product of two tensors
*
* Implements c = sum(A[i1,i2,...,ip] * B[i1,i2,...,ip])
*
* @note calls detail::inner
*
* @param[in] p number of dimensions (rank) of the first input tensor with p > 0
* @param[in] n pointer to the extents of input or output tensor
* @param[in] a pointer to the first input tensor
* @param[in] wa pointer to the strides of input tensor a
* @param[in] b pointer to the second input tensor
* @param[in] wb pointer to the strides of input tensor b
* @param[in] v inital value
*
* @return inner product of two tensors.
*/
template <class PointerIn1, class PointerIn2, class value_t, class SizeType>
auto inner(const SizeType p, SizeType const*const n,
const PointerIn1 a, SizeType const*const wa,
const PointerIn2 b, SizeType const*const wb,
value_t v)
{
static_assert( std::is_pointer<PointerIn1>::value && std::is_pointer<PointerIn2>::value,
"Static error in boost::numeric::ublas::inner: Argument types for pointers must be pointer types.");
if(p<2)
throw std::length_error("Error in boost::numeric::ublas::inner: Rank must be greater than zero.");
if(a == nullptr || b == nullptr)
throw std::length_error("Error in boost::numeric::ublas::inner: Pointers shall not be null pointers.");
return detail::recursive::inner(p-1, n, a, wa, b, wb, v);
}
/** @brief Computes the outer product of two tensors
*
* Implements C[i1,...,ip,j1,...,jq] = A[i1,i2,...,ip] * B[j1,j2,...,jq]
*
* @note calls detail::outer
*
* @param[out] c pointer to the output tensor
* @param[in] pc number of dimensions (rank) of the output tensor c with pc > 0
* @param[in] nc pointer to the extents of output tensor c
* @param[in] wc pointer to the strides of output tensor c
* @param[in] a pointer to the first input tensor
* @param[in] pa number of dimensions (rank) of the first input tensor a with pa > 0
* @param[in] na pointer to the extents of the first input tensor a
* @param[in] wa pointer to the strides of the first input tensor a
* @param[in] b pointer to the second input tensor
* @param[in] pb number of dimensions (rank) of the second input tensor b with pb > 0
* @param[in] nb pointer to the extents of the second input tensor b
* @param[in] wb pointer to the strides of the second input tensor b
*/
template <class PointerOut, class PointerIn1, class PointerIn2, class SizeType>
void outer(PointerOut c, SizeType const pc, SizeType const*const nc, SizeType const*const wc,
const PointerIn1 a, SizeType const pa, SizeType const*const na, SizeType const*const wa,
const PointerIn2 b, SizeType const pb, SizeType const*const nb, SizeType const*const wb)
{
static_assert( std::is_pointer<PointerIn1>::value & std::is_pointer<PointerIn2>::value & std::is_pointer<PointerOut>::value,
"Static error in boost::numeric::ublas::outer: argument types for pointers must be pointer types.");
if(pa < 2u || pb < 2u)
throw std::length_error("Error in boost::numeric::ublas::outer: number of extents of lhs and rhs tensor must be equal or greater than two.");
if((pa + pb) != pc)
throw std::length_error("Error in boost::numeric::ublas::outer: number of extents of lhs plus rhs tensor must be equal to the number of extents of C.");
if(a == nullptr || b == nullptr || c == nullptr)
throw std::length_error("Error in boost::numeric::ublas::outer: pointers shall not be null pointers.");
detail::recursive::outer(pa, pc-1, c, nc, wc, pa-1, a, na, wa, pb-1, b, nb, wb);
}
}
}
}
#endif

View File

@@ -0,0 +1,244 @@
//
// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// Fraunhofer IOSB, Ettlingen, Germany
//
#ifndef BOOST_UBLAS_TENSOR_OPERATORS_ARITHMETIC_HPP
#define BOOST_UBLAS_TENSOR_OPERATORS_ARITHMETIC_HPP
#include "expression.hpp"
#include "expression_evaluation.hpp"
#include "multi_index_utility.hpp"
#include "functions.hpp"
#include <type_traits>
#include <functional>
#include <algorithm>
namespace boost{
namespace numeric{
namespace ublas {
template<class element_type, class storage_format, class storage_type>
class tensor;
template<class E>
class matrix_expression;
template<class E>
class vector_expression;
}
}
}
#define FIRST_ORDER_OPERATOR_RIGHT(OP, EXPR_TYPE_L, EXPR_TYPE_R) \
template<class T, class L, class R> \
auto operator OP ( boost::numeric::ublas:: EXPR_TYPE_L <T,L> const& lhs, boost::numeric::ublas:: EXPR_TYPE_R <R> const& rhs) { \
return boost::numeric::ublas::detail::make_binary_tensor_expression<T> (lhs(), rhs(), \
[](auto const& l, auto const& r){ return l OP r; }); \
} \
FIRST_ORDER_OPERATOR_RIGHT (*, detail:: tensor_expression , vector_expression)
FIRST_ORDER_OPERATOR_RIGHT (+, detail:: tensor_expression , vector_expression)
FIRST_ORDER_OPERATOR_RIGHT (-, detail:: tensor_expression , vector_expression)
FIRST_ORDER_OPERATOR_RIGHT (/, detail:: tensor_expression , vector_expression)
FIRST_ORDER_OPERATOR_RIGHT (*, detail:: tensor_expression , matrix_expression)
FIRST_ORDER_OPERATOR_RIGHT (+, detail:: tensor_expression , matrix_expression)
FIRST_ORDER_OPERATOR_RIGHT (-, detail:: tensor_expression , matrix_expression)
FIRST_ORDER_OPERATOR_RIGHT (/, detail:: tensor_expression , matrix_expression)
#define FIRST_ORDER_OPERATOR_LEFT(OP, EXPR_TYPE_L, EXPR_TYPE_R) \
template<class T, class L, class R> \
auto operator OP ( boost::numeric::ublas:: EXPR_TYPE_L <L> const& lhs, boost::numeric::ublas:: EXPR_TYPE_R <T,R> const& rhs) { \
return boost::numeric::ublas::detail::make_binary_tensor_expression<T> (lhs(), rhs(), \
[](auto const& l, auto const& r){ return l OP r; }); \
} \
FIRST_ORDER_OPERATOR_LEFT (*, vector_expression, detail:: tensor_expression)
FIRST_ORDER_OPERATOR_LEFT (+, vector_expression, detail:: tensor_expression)
FIRST_ORDER_OPERATOR_LEFT (-, vector_expression, detail:: tensor_expression)
FIRST_ORDER_OPERATOR_LEFT (/, vector_expression, detail:: tensor_expression)
FIRST_ORDER_OPERATOR_LEFT (*, matrix_expression, detail:: tensor_expression)
FIRST_ORDER_OPERATOR_LEFT (+, matrix_expression, detail:: tensor_expression)
FIRST_ORDER_OPERATOR_LEFT (-, matrix_expression, detail:: tensor_expression)
FIRST_ORDER_OPERATOR_LEFT (/, matrix_expression, detail:: tensor_expression)
template<class T, class L, class R>
auto operator+( boost::numeric::ublas::detail::tensor_expression<T,L> const& lhs, boost::numeric::ublas::detail::tensor_expression<T,R> const& rhs) {
return boost::numeric::ublas::detail::make_binary_tensor_expression<T> (lhs(), rhs(), [](auto const& l, auto const& r){ return l + r; });
}
template<class T, class L, class R>
auto operator-( boost::numeric::ublas::detail::tensor_expression<T,L> const& lhs, boost::numeric::ublas::detail::tensor_expression<T,R> const& rhs) {
return boost::numeric::ublas::detail::make_binary_tensor_expression<T> (lhs(), rhs(), [](auto const& l, auto const& r){ return l - r; });
// return boost::numeric::ublas::detail::make_lambda<T>([&lhs,&rhs](std::size_t i){ return lhs(i) - rhs(i);});
}
template<class T, class L, class R>
auto operator*( boost::numeric::ublas::detail::tensor_expression<T,L> const& lhs, boost::numeric::ublas::detail::tensor_expression<T,R> const& rhs) {
return boost::numeric::ublas::detail::make_binary_tensor_expression<T> (lhs(), rhs(), [](auto const& l, auto const& r){ return l * r; });
}
template<class T, class L, class R>
auto operator/( boost::numeric::ublas::detail::tensor_expression<T,L> const& lhs, boost::numeric::ublas::detail::tensor_expression<T,R> const& rhs) {
return boost::numeric::ublas::detail::make_binary_tensor_expression<T> (lhs(), rhs(), [](auto const& l, auto const& r){ return l / r; });
}
// Overloaded Arithmetic Operators with Scalars
template<class T, class R>
auto operator+(typename T::const_reference lhs, boost::numeric::ublas::detail::tensor_expression<T,R> const& rhs) {
return boost::numeric::ublas::detail::make_unary_tensor_expression<T> (rhs(), [lhs](auto const& r){ return lhs + r; });
//return boost::numeric::ublas::detail::make_lambda<T>( [&lhs,&rhs](std::size_t i) {return lhs + rhs(i); } );
}
template<class T, class R>
auto operator-(typename T::const_reference lhs, boost::numeric::ublas::detail::tensor_expression<T,R> const& rhs) {
return boost::numeric::ublas::detail::make_unary_tensor_expression<T> (rhs(), [lhs](auto const& r){ return lhs - r; });
}
template<class T, class R>
auto operator*(typename T::const_reference lhs, boost::numeric::ublas::detail::tensor_expression<T,R> const& rhs) {
return boost::numeric::ublas::detail::make_unary_tensor_expression<T> (rhs(), [lhs](auto const& r){ return lhs * r; });
}
template<class T, class R>
auto operator/(typename T::const_reference lhs, boost::numeric::ublas::detail::tensor_expression<T,R> const& rhs) {
return boost::numeric::ublas::detail::make_unary_tensor_expression<T> (rhs(), [lhs](auto const& r){ return lhs / r; });
}
template<class T, class L>
auto operator+(boost::numeric::ublas::detail::tensor_expression<T,L> const& lhs, typename T::const_reference rhs) {
return boost::numeric::ublas::detail::make_unary_tensor_expression<T> (lhs(), [rhs] (auto const& l) { return l + rhs; } );
}
template<class T, class L>
auto operator-(boost::numeric::ublas::detail::tensor_expression<T,L> const& lhs, typename T::const_reference rhs) {
return boost::numeric::ublas::detail::make_unary_tensor_expression<T> (lhs(), [rhs] (auto const& l) { return l - rhs; } );
}
template<class T, class L>
auto operator*(boost::numeric::ublas::detail::tensor_expression<T,L> const& lhs, typename T::const_reference rhs) {
return boost::numeric::ublas::detail::make_unary_tensor_expression<T> (lhs(), [rhs] (auto const& l) { return l * rhs; } );
}
template<class T, class L>
auto operator/(boost::numeric::ublas::detail::tensor_expression<T,L> const& lhs, typename T::const_reference rhs) {
return boost::numeric::ublas::detail::make_unary_tensor_expression<T> (lhs(), [rhs] (auto const& l) { return l / rhs; } );
}
template<class T, class D>
auto& operator += (T& lhs, const boost::numeric::ublas::detail::tensor_expression<T,D> &expr) {
boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l+=r; } );
return lhs;
}
template<class T, class D>
auto& operator -= (T& lhs, const boost::numeric::ublas::detail::tensor_expression<T,D> &expr) {
boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l-=r; } );
return lhs;
}
template<class T, class D>
auto& operator *= (T& lhs, const boost::numeric::ublas::detail::tensor_expression<T,D> &expr) {
boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l*=r; } );
return lhs;
}
template<class T, class D>
auto& operator /= (T& lhs, const boost::numeric::ublas::detail::tensor_expression<T,D> &expr) {
boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l/=r; } );
return lhs;
}
template<class E, class F, class A>
auto& operator += (boost::numeric::ublas::tensor<E,F,A>& lhs, typename boost::numeric::ublas::tensor<E,F,A>::const_reference r) {
boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l+=r; } );
return lhs;
}
template<class E, class F, class A>
auto& operator -= (boost::numeric::ublas::tensor<E,F,A>& lhs, typename boost::numeric::ublas::tensor<E,F,A>::const_reference r) {
boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l-=r; } );
return lhs;
}
template<class E, class F, class A>
auto& operator *= (boost::numeric::ublas::tensor<E,F,A>& lhs, typename boost::numeric::ublas::tensor<E,F,A>::const_reference r) {
boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l*=r; } );
return lhs;
}
template<class E, class F, class A>
auto& operator /= (boost::numeric::ublas::tensor<E,F,A>& lhs, typename boost::numeric::ublas::tensor<E,F,A>::const_reference r) {
boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l/=r; } );
return lhs;
}
template<class T, class D>
auto const& operator +(const boost::numeric::ublas::detail::tensor_expression<T,D>& lhs) {
return lhs;
}
template<class T, class D>
auto operator -(boost::numeric::ublas::detail::tensor_expression<T,D> const& lhs) {
return boost::numeric::ublas::detail::make_unary_tensor_expression<T> (lhs(), [] (auto const& l) { return -l; } );
}
/** @brief Performs a tensor contraction, not an elementwise multiplication
*
*/
template<class tensor_type_left, class tuple_type_left, class tensor_type_right, class tuple_type_right>
auto operator*(
std::pair< tensor_type_left const&, tuple_type_left > lhs,
std::pair< tensor_type_right const&, tuple_type_right > rhs)
{
using namespace boost::numeric::ublas;
auto const& tensor_left = lhs.first;
auto const& tensor_right = rhs.first;
auto multi_index_left = lhs.second;
auto multi_index_right = rhs.second;
static constexpr auto num_equal_ind = number_equal_indexes<tuple_type_left, tuple_type_right>::value;
if constexpr ( num_equal_ind == 0 ){
return tensor_left * tensor_right;
}
else if constexpr ( num_equal_ind==std::tuple_size<tuple_type_left>::value && std::is_same<tuple_type_left, tuple_type_right>::value ){
return boost::numeric::ublas::inner_prod( tensor_left, tensor_right );
}
else {
auto array_index_pairs = index_position_pairs(multi_index_left,multi_index_right);
auto index_pairs = array_to_vector( array_index_pairs );
return boost::numeric::ublas::prod( tensor_left, tensor_right, index_pairs.first, index_pairs.second );
}
}
#endif

View File

@@ -0,0 +1,175 @@
//
// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// Fraunhofer IOSB, Ettlingen, Germany
//
#ifndef BOOST_UBLAS_TENSOR_OPERATORS_COMPARISON_HPP
#define BOOST_UBLAS_TENSOR_OPERATORS_COMPARISON_HPP
#include <boost/numeric/ublas/tensor/expression.hpp>
#include <boost/numeric/ublas/tensor/expression_evaluation.hpp>
#include <type_traits>
#include <functional>
namespace boost::numeric::ublas {
template<class element_type, class storage_format, class storage_type>
class tensor;
}
namespace boost::numeric::ublas::detail {
template<class T, class F, class A, class BinaryPred>
bool compare(tensor<T,F,A> const& lhs, tensor<T,F,A> const& rhs, BinaryPred pred)
{
if(lhs.extents() != rhs.extents()){
if constexpr(!std::is_same<BinaryPred,std::equal_to<>>::value && !std::is_same<BinaryPred,std::not_equal_to<>>::value)
throw std::runtime_error("Error in boost::numeric::ublas::detail::compare: cannot compare tensors with different shapes.");
else
return false;
}
if constexpr(std::is_same<BinaryPred,std::greater<>>::value || std::is_same<BinaryPred,std::less<>>::value)
if(lhs.empty())
return false;
for(auto i = 0u; i < lhs.size(); ++i)
if(!pred(lhs(i), rhs(i)))
return false;
return true;
}
template<class T, class F, class A, class UnaryPred>
bool compare(tensor<T,F,A> const& rhs, UnaryPred pred)
{
for(auto i = 0u; i < rhs.size(); ++i)
if(!pred(rhs(i)))
return false;
return true;
}
template<class T, class L, class R, class BinaryPred>
bool compare(tensor_expression<T,L> const& lhs, tensor_expression<T,R> const& rhs, BinaryPred pred)
{
constexpr bool lhs_is_tensor = std::is_same<T,L>::value;
constexpr bool rhs_is_tensor = std::is_same<T,R>::value;
if constexpr (lhs_is_tensor && rhs_is_tensor)
return compare(static_cast<T const&>( lhs ), static_cast<T const&>( rhs ), pred);
else if constexpr (lhs_is_tensor && !rhs_is_tensor)
return compare(static_cast<T const&>( lhs ), T( rhs ), pred);
else if constexpr (!lhs_is_tensor && rhs_is_tensor)
return compare(T( lhs ), static_cast<T const&>( rhs ), pred);
else
return compare(T( lhs ), T( rhs ), pred);
}
template<class T, class D, class UnaryPred>
bool compare(tensor_expression<T,D> const& expr, UnaryPred pred)
{
if constexpr (std::is_same<T,D>::value)
return compare(static_cast<T const&>( expr ), pred);
else
return compare(T( expr ), pred);
}
}
template<class T, class L, class R>
bool operator==( boost::numeric::ublas::detail::tensor_expression<T,L> const& lhs,
boost::numeric::ublas::detail::tensor_expression<T,R> const& rhs) {
return boost::numeric::ublas::detail::compare( lhs, rhs, std::equal_to<>{} );
}
template<class T, class L, class R>
auto operator!=(boost::numeric::ublas::detail::tensor_expression<T,L> const& lhs,
boost::numeric::ublas::detail::tensor_expression<T,R> const& rhs) {
return boost::numeric::ublas::detail::compare( lhs, rhs, std::not_equal_to<>{} );
}
template<class T, class L, class R>
auto operator< ( boost::numeric::ublas::detail::tensor_expression<T,L> const& lhs,
boost::numeric::ublas::detail::tensor_expression<T,R> const& rhs) {
return boost::numeric::ublas::detail::compare( lhs, rhs, std::less<>{} );
}
template<class T, class L, class R>
auto operator<=( boost::numeric::ublas::detail::tensor_expression<T,L> const& lhs,
boost::numeric::ublas::detail::tensor_expression<T,R> const& rhs) {
return boost::numeric::ublas::detail::compare( lhs, rhs, std::less_equal<>{} );
}
template<class T, class L, class R>
auto operator> ( boost::numeric::ublas::detail::tensor_expression<T,L> const& lhs,
boost::numeric::ublas::detail::tensor_expression<T,R> const& rhs) {
return boost::numeric::ublas::detail::compare( lhs, rhs, std::greater<>{} );
}
template<class T, class L, class R>
auto operator>=( boost::numeric::ublas::detail::tensor_expression<T,L> const& lhs,
boost::numeric::ublas::detail::tensor_expression<T,R> const& rhs) {
return boost::numeric::ublas::detail::compare( lhs, rhs, std::greater_equal<>{} );
}
template<class T, class D>
bool operator==( typename T::const_reference lhs, boost::numeric::ublas::detail::tensor_expression<T,D> const& rhs) {
return boost::numeric::ublas::detail::compare( rhs, [lhs](auto const& r){ return lhs == r; } );
}
template<class T, class D>
auto operator!=( typename T::const_reference lhs, boost::numeric::ublas::detail::tensor_expression<T,D> const& rhs) {
return boost::numeric::ublas::detail::compare( rhs, [lhs](auto const& r){ return lhs != r; } );
}
template<class T, class D>
auto operator< ( typename T::const_reference lhs, boost::numeric::ublas::detail::tensor_expression<T,D> const& rhs) {
return boost::numeric::ublas::detail::compare( rhs, [lhs](auto const& r){ return lhs < r; } );
}
template<class T, class D>
auto operator<=( typename T::const_reference lhs, boost::numeric::ublas::detail::tensor_expression<T,D> const& rhs) {
return boost::numeric::ublas::detail::compare( rhs, [lhs](auto const& r){ return lhs <= r; } );
}
template<class T, class D>
auto operator> ( typename T::const_reference lhs, boost::numeric::ublas::detail::tensor_expression<T,D> const& rhs) {
return boost::numeric::ublas::detail::compare( rhs, [lhs](auto const& r){ return lhs > r; } );
}
template<class T, class D>
auto operator>=( typename T::const_reference lhs, boost::numeric::ublas::detail::tensor_expression<T,D> const& rhs) {
return boost::numeric::ublas::detail::compare( rhs, [lhs](auto const& r){ return lhs >= r; } );
}
template<class T, class D>
bool operator==( boost::numeric::ublas::detail::tensor_expression<T,D> const& lhs, typename T::const_reference rhs) {
return boost::numeric::ublas::detail::compare( lhs, [rhs](auto const& l){ return l == rhs; } );
}
template<class T, class D>
auto operator!=( boost::numeric::ublas::detail::tensor_expression<T,D> const& lhs, typename T::const_reference rhs) {
return boost::numeric::ublas::detail::compare( lhs, [rhs](auto const& l){ return l != rhs; } );
}
template<class T, class D>
auto operator< ( boost::numeric::ublas::detail::tensor_expression<T,D> const& lhs, typename T::const_reference rhs) {
return boost::numeric::ublas::detail::compare( lhs, [rhs](auto const& l){ return l < rhs; } );
}
template<class T, class D>
auto operator<=( boost::numeric::ublas::detail::tensor_expression<T,D> const& lhs, typename T::const_reference rhs) {
return boost::numeric::ublas::detail::compare( lhs, [rhs](auto const& l){ return l <= rhs; } );
}
template<class T, class D>
auto operator> ( boost::numeric::ublas::detail::tensor_expression<T,D> const& lhs, typename T::const_reference rhs) {
return boost::numeric::ublas::detail::compare( lhs, [rhs](auto const& l){ return l > rhs; } );
}
template<class T, class D>
auto operator>=( boost::numeric::ublas::detail::tensor_expression<T,D> const& lhs, typename T::const_reference rhs) {
return boost::numeric::ublas::detail::compare( lhs, [rhs](auto const& l){ return l >= rhs; } );
}
#endif

View File

@@ -0,0 +1,122 @@
//
// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// Fraunhofer IOSB, Ettlingen, Germany
//
#ifndef BOOST_UBLAS_TENSOR_OSTREAM_HPP
#define BOOST_UBLAS_TENSOR_OSTREAM_HPP
#include <ostream>
#include <complex>
namespace boost {
namespace numeric {
namespace ublas {
namespace detail {
template <class value_type>
void print(std::ostream& out, value_type const& p)
{
out << p << " ";
}
template <class value_type>
void print(std::ostream& out, const std::complex<value_type>& p)
{
out << std::real(p) << "+" << std::imag(p) << "i ";
}
template <class size_type, class value_type>
void print(std::ostream& out, size_type r, const value_type* p, const size_type* w, const size_type* n)
{
if(r < 2)
{
out << "[ ... " << std::endl;
for(auto row = 0u; row < n[0]; p += w[0], ++row) // iterate over one column
{
auto p1 = p;
for(auto col = 0u; col < n[1]; p1 += w[1], ++col) // iterate over first row
{
print(out,*p1);
}
if(row < n[0]-1)
out << "; " << std::endl;
}
out << "]";
}
else
{
out << "cat("<< r+1 <<",..." << std::endl;
for(auto d = 0u; d < n[r]-1; p += w[r], ++d){
print(out, r-1, p, w, n);
out << ",..." << std::endl;
}
print(out, r-1, p, w, n);
}
if(r>1)
out << ")";
}
////////////////////////////
}
}
}
}
namespace boost {
namespace numeric {
namespace ublas {
template<class T, class F, class A>
class tensor;
template<class T, class F, class A>
class matrix;
template<class T, class A>
class vector;
}
}
}
template <class V, class F, class A>
std::ostream& operator << (std::ostream& out, boost::numeric::ublas::tensor<V,F,A> const& t)
{
if(t.extents().is_scalar()){
out << '[';
boost::numeric::ublas::detail::print(out,t[0]);
out << ']';
}
else if(t.extents().is_vector()) {
const auto& cat = t.extents().at(0) > t.extents().at(1) ? ';' : ',';
out << '[';
for(auto i = 0u; i < t.size()-1; ++i){
boost::numeric::ublas::detail::print(out,t[i]);
out << cat << ' ';
}
boost::numeric::ublas::detail::print(out,t[t.size()-1]);
out << ']';
}
else{
boost::numeric::ublas::detail::print(out, t.rank()-1, t.data(), t.strides().data(), t.extents().data());
}
return out;
}
#endif

View File

@@ -0,0 +1,84 @@
//
// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// Fraunhofer IOSB, Ettlingen Germany
//
#ifndef _BOOST_STORAGE_TRAITS_HPP_
#define _BOOST_STORAGE_TRAITS_HPP_
#include <vector>
#include <array>
namespace boost {
namespace numeric {
namespace ublas {
template <class A>
struct storage_traits;
template <class V, class A>
struct storage_traits<std::vector<V,A>>
{
using array_type = std::vector<V,A>;
using size_type = typename array_type::size_type;
using difference_type = typename array_type::difference_type;
using value_type = typename array_type::value_type;
using reference = typename array_type::reference;
using const_reference = typename array_type::const_reference;
using pointer = typename array_type::pointer;
using const_pointer = typename array_type::const_pointer;
using iterator = typename array_type::iterator;
using const_iterator = typename array_type::const_iterator;
using reverse_iterator = typename array_type::reverse_iterator;
using const_reverse_iterator = typename array_type::const_reverse_iterator;
template<class U>
using rebind = std::vector<U, typename std::allocator_traits<A>::template rebind_alloc<U>>;
};
template <class V, std::size_t N>
struct storage_traits<std::array<V,N>>
{
using array_type = std::array<V,N>;
using size_type = typename array_type::size_type;
using difference_type = typename array_type::difference_type;
using value_type = typename array_type::value_type;
using reference = typename array_type::reference;
using const_reference = typename array_type::const_reference;
using pointer = typename array_type::pointer;
using const_pointer = typename array_type::const_pointer;
using iterator = typename array_type::iterator;
using const_iterator = typename array_type::const_iterator;
using reverse_iterator = typename array_type::reverse_iterator;
using const_reverse_iterator = typename array_type::const_reverse_iterator;
template<class U>
using rebind = std::array<U,N>;
};
} // ublas
} // numeric
} // boost
#endif // _BOOST_STORAGE_TRAITS_HPP_

View File

@@ -0,0 +1,251 @@
//
// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// Fraunhofer IOSB, Ettlingen, Germany
//
/// \file strides.hpp Definition for the basic_strides template class
#ifndef BOOST_UBLAS_TENSOR_STRIDES_HPP
#define BOOST_UBLAS_TENSOR_STRIDES_HPP
#include <vector>
#include <limits>
#include <numeric>
#include <stdexcept>
#include <initializer_list>
#include <algorithm>
#include <cassert>
#include <boost/numeric/ublas/functional.hpp>
namespace boost {
namespace numeric {
namespace ublas {
using first_order = column_major;
using last_order = row_major;
template<class T>
class basic_extents;
/** @brief Template class for storing tensor strides for iteration with runtime variable size.
*
* Proxy template class of std::vector<int_type>.
*
*/
template<class __int_type, class __layout>
class basic_strides
{
public:
using base_type = std::vector<__int_type>;
static_assert( std::numeric_limits<typename base_type::value_type>::is_integer,
"Static error in boost::numeric::ublas::basic_strides: type must be of type integer.");
static_assert(!std::numeric_limits<typename base_type::value_type>::is_signed,
"Static error in boost::numeric::ublas::basic_strides: type must be of type unsigned integer.");
static_assert(std::is_same<__layout,first_order>::value || std::is_same<__layout,last_order>::value,
"Static error in boost::numeric::ublas::basic_strides: layout type must either first or last order");
using layout_type = __layout;
using value_type = typename base_type::value_type;
using reference = typename base_type::reference;
using const_reference = typename base_type::const_reference;
using size_type = typename base_type::size_type;
using const_pointer = typename base_type::const_pointer;
using const_iterator = typename base_type::const_iterator;
/** @brief Default constructs basic_strides
*
* @code auto ex = basic_strides<unsigned>{};
*/
constexpr explicit basic_strides()
: _base{}
{
}
/** @brief Constructs basic_strides from basic_extents for the first- and last-order storage formats
*
* @code auto strides = basic_strides<unsigned>( basic_extents<std::size_t>{2,3,4} );
*
*/
template <class T>
basic_strides(basic_extents<T> const& s)
: _base(s.size(),1)
{
if(s.empty())
return;
if(!s.valid())
throw std::runtime_error("Error in boost::numeric::ublas::basic_strides() : shape is not valid.");
if(s.is_vector() || s.is_scalar())
return;
if(this->size() < 2)
throw std::runtime_error("Error in boost::numeric::ublas::basic_strides() : size of strides must be greater or equal 2.");
if constexpr (std::is_same<layout_type,first_order>::value){
size_type k = 1ul, kend = this->size();
for(; k < kend; ++k)
_base[k] = _base[k-1] * s[k-1];
}
else {
size_type k = this->size()-2, kend = 0ul;
for(; k > kend; --k)
_base[k] = _base[k+1] * s[k+1];
_base[0] = _base[1] * s[1];
}
}
basic_strides(basic_strides const& l)
: _base(l._base)
{}
basic_strides(basic_strides && l )
: _base(std::move(l._base))
{}
basic_strides(base_type const& l )
: _base(l)
{}
basic_strides(base_type && l )
: _base(std::move(l))
{}
~basic_strides() = default;
basic_strides& operator=(basic_strides other)
{
swap (*this, other);
return *this;
}
friend void swap(basic_strides& lhs, basic_strides& rhs) {
std::swap(lhs._base , rhs._base);
}
const_reference operator[] (size_type p) const{
return _base[p];
}
const_pointer data() const{
return _base.data();
}
const_reference at (size_type p) const{
return _base.at(p);
}
bool empty() const{
return _base.empty();
}
size_type size() const{
return _base.size();
}
template<class other_layout>
bool operator == (basic_strides<value_type, other_layout> const& b) const{
return b.base() == this->base();
}
template<class other_layout>
bool operator != (basic_strides<value_type, other_layout> const& b) const{
return b.base() != this->base();
}
bool operator == (basic_strides const& b) const{
return b._base == _base;
}
bool operator != (basic_strides const& b) const{
return b._base != _base;
}
const_iterator begin() const{
return _base.begin();
}
const_iterator end() const{
return _base.end();
}
void clear() {
this->_base.clear();
}
base_type const& base() const{
return this->_base;
}
protected:
base_type _base;
};
template<class layout_type>
using strides = basic_strides<std::size_t, layout_type>;
namespace detail {
/** @brief Returns relative memory index with respect to a multi-index
*
* @code auto j = access(std::vector{3,4,5}, strides{shape{4,2,3},first_order}); @endcode
*
* @param[in] i multi-index of length p
* @param[in] w stride vector of length p
* @returns relative memory location depending on \c i and \c w
*/
BOOST_UBLAS_INLINE
template<class size_type, class layout_type>
auto access(std::vector<size_type> const& i, basic_strides<size_type,layout_type> const& w)
{
const auto p = i.size();
size_type sum = 0u;
for(auto r = 0u; r < p; ++r)
sum += i[r]*w[r];
return sum;
}
/** @brief Returns relative memory index with respect to a multi-index
*
* @code auto j = access(0, strides{shape{4,2,3},first_order}, 2,3,4); @endcode
*
* @param[in] i first element of the partial multi-index
* @param[in] is the following elements of the partial multi-index
* @param[in] sum the current relative memory index
* @returns relative memory location depending on \c i and \c w
*/
BOOST_UBLAS_INLINE
template<std::size_t r, class layout_type, class ... size_types>
auto access(std::size_t sum, basic_strides<std::size_t, layout_type> const& w, std::size_t i, size_types ... is)
{
sum+=i*w[r];
if constexpr (sizeof...(is) == 0)
return sum;
else
return detail::access<r+1>(sum,w,std::forward<size_types>(is)...);
}
}
}
}
}
#endif

View File

@@ -0,0 +1,734 @@
// Copyright (c) 2018-2019
// Cem Bassoy
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// Fraunhofer and Google in producing this work
// which started as a Google Summer of Code project.
//
/// \file tensor.hpp Definition for the tensor template class
#ifndef BOOST_UBLAS_TENSOR_IMPL_HPP
#define BOOST_UBLAS_TENSOR_IMPL_HPP
#include <boost/config.hpp>
#include <initializer_list>
#include "algorithms.hpp"
#include "expression.hpp"
#include "expression_evaluation.hpp"
#include "extents.hpp"
#include "strides.hpp"
#include "index.hpp"
namespace boost { namespace numeric { namespace ublas {
template<class T, class F, class A>
class tensor;
template<class T, class F, class A>
class matrix;
template<class T, class A>
class vector;
///** \brief Base class for Tensor container models
// *
// * it does not model the Tensor concept but all derived types should.
// * The class defines a common base type and some common interface for all
// * statically derived Tensor classes
// * We implement the casts to the statically derived type.
// */
//template<class C>
//class tensor_container:
// public detail::tensor_expression<C>
//{
//public:
// static const unsigned complexity = 0;
// typedef C container_type;
// typedef tensor_tag type_category;
// BOOST_UBLAS_INLINE
// const container_type &operator () () const {
// return *static_cast<const container_type *> (this);
// }
// BOOST_UBLAS_INLINE
// container_type &operator () () {
// return *static_cast<container_type *> (this);
// }
//};
/** @brief A dense tensor of values of type \c T.
*
* For a \f$n\f$-dimensional tensor \f$v\f$ and \f$0\leq i < n\f$ every element \f$v_i\f$ is mapped
* to the \f$i\f$-th element of the container. A storage type \c A can be specified which defaults to \c unbounded_array.
* Elements are constructed by \c A, which need not initialise their value.
*
* @tparam T type of the objects stored in the tensor (like int, double, complex,...)
* @tparam A The type of the storage array of the tensor. Default is \c unbounded_array<T>. \c <bounded_array<T> and \c std::vector<T> can also be used
*/
template<class T, class F = first_order, class A = std::vector<T,std::allocator<T>> >
class tensor:
public detail::tensor_expression<tensor<T, F, A>,tensor<T, F, A>>
{
static_assert( std::is_same<F,first_order>::value ||
std::is_same<F,last_order >::value, "boost::numeric::tensor template class only supports first- or last-order storage formats.");
using self_type = tensor<T, F, A>;
public:
template<class derived_type>
using tensor_expression_type = detail::tensor_expression<self_type,derived_type>;
template<class derived_type>
using matrix_expression_type = matrix_expression<derived_type>;
template<class derived_type>
using vector_expression_type = vector_expression<derived_type>;
using super_type = tensor_expression_type<self_type>;
// static_assert(std::is_same_v<tensor_expression_type<self_type>, detail::tensor_expression<tensor<T,F,A>,tensor<T,F,A>>>, "tensor_expression_type<self_type>");
using array_type = A;
using layout_type = F;
using size_type = typename array_type::size_type;
using difference_type = typename array_type::difference_type;
using value_type = typename array_type::value_type;
using reference = typename array_type::reference;
using const_reference = typename array_type::const_reference;
using pointer = typename array_type::pointer;
using const_pointer = typename array_type::const_pointer;
using iterator = typename array_type::iterator;
using const_iterator = typename array_type::const_iterator;
using reverse_iterator = typename array_type::reverse_iterator;
using const_reverse_iterator = typename array_type::const_reverse_iterator;
using tensor_temporary_type = self_type;
using storage_category = dense_tag;
using strides_type = basic_strides<std::size_t,layout_type>;
using extents_type = shape;
using matrix_type = matrix<value_type,layout_type,array_type>;
using vector_type = vector<value_type,array_type>;
/** @brief Constructs a tensor.
*
* @note the tensor is empty.
* @note the tensor needs to reshaped for further use.
*
*/
BOOST_UBLAS_INLINE
constexpr tensor ()
: tensor_expression_type<self_type>() // container_type
, extents_()
, strides_()
, data_()
{
}
/** @brief Constructs a tensor with an initializer list
*
* By default, its elements are initialized to 0.
*
* @code tensor<float> A{4,2,3}; @endcode
*
* @param l initializer list for setting the dimension extents of the tensor
*/
explicit BOOST_UBLAS_INLINE
tensor (std::initializer_list<size_type> l)
: tensor_expression_type<self_type>()
, extents_ (std::move(l))
, strides_ (extents_)
, data_ (extents_.product())
{
}
/** @brief Constructs a tensor with a \c shape
*
* By default, its elements are initialized to 0.
*
* @code tensor<float> A{extents{4,2,3}}; @endcode
*
* @param s initial tensor dimension extents
*/
explicit BOOST_UBLAS_INLINE
tensor (extents_type const& s)
: tensor_expression_type<self_type>() //tensor_container<self_type>()
, extents_ (s)
, strides_ (extents_)
, data_ (extents_.product())
{}
/** @brief Constructs a tensor with a \c shape and initiates it with one-dimensional data
*
* @code tensor<float> A{extents{4,2,3}, array }; @endcode
*
*
* @param s initial tensor dimension extents
* @param a container of \c array_type that is copied according to the storage layout
*/
BOOST_UBLAS_INLINE
tensor (extents_type const& s, const array_type &a)
: tensor_expression_type<self_type>() //tensor_container<self_type>()
, extents_ (s)
, strides_ (extents_)
, data_ (a)
{
if(this->extents_.product() != this->data_.size())
throw std::runtime_error("Error in boost::numeric::ublas::tensor: size of provided data and specified extents do not match.");
}
/** @brief Constructs a tensor using a shape tuple and initiates it with a value.
*
* @code tensor<float> A{extents{4,2,3}, 1 }; @endcode
*
* @param e initial tensor dimension extents
* @param i initial value of all elements of type \c value_type
*/
BOOST_UBLAS_INLINE
tensor (extents_type const& e, const value_type &i)
: tensor_expression_type<self_type>() //tensor_container<self_type> ()
, extents_ (e)
, strides_ (extents_)
, data_ (extents_.product(), i)
{}
/** @brief Constructs a tensor from another tensor
*
* @param v tensor to be copied.
*/
BOOST_UBLAS_INLINE
tensor (const tensor &v)
: tensor_expression_type<self_type>()
, extents_ (v.extents_)
, strides_ (v.strides_)
, data_ (v.data_ )
{}
/** @brief Constructs a tensor from another tensor
*
* @param v tensor to be moved.
*/
BOOST_UBLAS_INLINE
tensor (tensor &&v)
: tensor_expression_type<self_type>() //tensor_container<self_type> ()
, extents_ (std::move(v.extents_))
, strides_ (std::move(v.strides_))
, data_ (std::move(v.data_ ))
{}
/** @brief Constructs a tensor with a matrix
*
* \note Initially the tensor will be two-dimensional.
*
* @param v matrix to be copied.
*/
BOOST_UBLAS_INLINE
tensor (const matrix_type &v)
: tensor_expression_type<self_type>()
, extents_ ()
, strides_ ()
, data_ (v.data())
{
if(!data_.empty()){
extents_ = extents_type{v.size1(),v.size2()};
strides_ = strides_type(extents_);
}
}
/** @brief Constructs a tensor with a matrix
*
* \note Initially the tensor will be two-dimensional.
*
* @param v matrix to be moved.
*/
BOOST_UBLAS_INLINE
tensor (matrix_type &&v)
: tensor_expression_type<self_type>()
, extents_ {}
, strides_ {}
, data_ {}
{
if(v.size1()*v.size2() != 0){
extents_ = extents_type{v.size1(),v.size2()};
strides_ = strides_type(extents_);
data_ = std::move(v.data());
}
}
/** @brief Constructs a tensor using a \c vector
*
* @note It is assumed that vector is column vector
* @note Initially the tensor will be one-dimensional.
*
* @param v vector to be copied.
*/
BOOST_UBLAS_INLINE
tensor (const vector_type &v)
: tensor_expression_type<self_type>()
, extents_ ()
, strides_ ()
, data_ (v.data())
{
if(!data_.empty()){
extents_ = extents_type{data_.size(),1};
strides_ = strides_type(extents_);
}
}
/** @brief Constructs a tensor using a \c vector
*
* @param v vector to be moved.
*/
BOOST_UBLAS_INLINE
tensor (vector_type &&v)
: tensor_expression_type<self_type>()
, extents_ {}
, strides_ {}
, data_ {}
{
if(v.size() != 0){
extents_ = extents_type{v.size(),1};
strides_ = strides_type(extents_);
data_ = std::move(v.data());
}
}
/** @brief Constructs a tensor with another tensor with a different layout
*
* @param other tensor with a different layout to be copied.
*/
BOOST_UBLAS_INLINE
template<class other_layout>
tensor (const tensor<value_type, other_layout> &other)
: tensor_expression_type<self_type> ()
, extents_ (other.extents())
, strides_ (other.extents())
, data_ (other.extents().product())
{
copy(this->rank(), this->extents().data(),
this->data(), this->strides().data(),
other.data(), other.strides().data());
}
/** @brief Constructs a tensor with an tensor expression
*
* @code tensor<float> A = B + 3 * C; @endcode
*
* @note type must be specified of tensor must be specified.
* @note dimension extents are extracted from tensors within the expression.
*
* @param expr tensor expression
*/
BOOST_UBLAS_INLINE
template<class derived_type>
tensor (const tensor_expression_type<derived_type> &expr)
: tensor_expression_type<self_type> ()
, extents_ ( detail::retrieve_extents(expr) )
, strides_ ( extents_ )
, data_ ( extents_.product() )
{
static_assert( detail::has_tensor_types<self_type, tensor_expression_type<derived_type>>::value,
"Error in boost::numeric::ublas::tensor: expression does not contain a tensor. cannot retrieve shape.");
detail::eval( *this, expr );
}
/** @brief Constructs a tensor with a matrix expression
*
* @code tensor<float> A = B + 3 * C; @endcode
*
* @note matrix expression is evaluated and pushed into a temporary matrix before assignment.
* @note extents are automatically extracted from the temporary matrix
*
* @param expr matrix expression
*/
BOOST_UBLAS_INLINE
template<class derived_type>
tensor (const matrix_expression_type<derived_type> &expr)
: tensor( matrix_type ( expr ) )
{
}
/** @brief Constructs a tensor with a vector expression
*
* @code tensor<float> A = b + 3 * b; @endcode
*
* @note matrix expression is evaluated and pushed into a temporary matrix before assignment.
* @note extents are automatically extracted from the temporary matrix
*
* @param expr vector expression
*/
BOOST_UBLAS_INLINE
template<class derived_type>
tensor (const vector_expression_type<derived_type> &expr)
: tensor( vector_type ( expr ) )
{
}
/** @brief Evaluates the tensor_expression and assigns the results to the tensor
*
* @code A = B + C * 2; @endcode
*
* @note rank and dimension extents of the tensors in the expressions must conform with this tensor.
*
* @param expr expression that is evaluated.
*/
BOOST_UBLAS_INLINE
template<class derived_type>
tensor &operator = (const tensor_expression_type<derived_type> &expr)
{
detail::eval(*this, expr);
return *this;
}
tensor& operator=(tensor other)
{
swap (*this, other);
return *this;
}
tensor& operator=(const_reference v)
{
std::fill(this->begin(), this->end(), v);
return *this;
}
/** @brief Returns true if the tensor is empty (\c size==0) */
BOOST_UBLAS_INLINE
bool empty () const {
return this->data_.empty();
}
/** @brief Returns the size of the tensor */
BOOST_UBLAS_INLINE
size_type size () const {
return this->data_.size ();
}
/** @brief Returns the size of the tensor */
BOOST_UBLAS_INLINE
size_type size (size_type r) const {
return this->extents_.at(r);
}
/** @brief Returns the number of dimensions/modes of the tensor */
BOOST_UBLAS_INLINE
size_type rank () const {
return this->extents_.size();
}
/** @brief Returns the number of dimensions/modes of the tensor */
BOOST_UBLAS_INLINE
size_type order () const {
return this->extents_.size();
}
/** @brief Returns the strides of the tensor */
BOOST_UBLAS_INLINE
strides_type const& strides () const {
return this->strides_;
}
/** @brief Returns the extents of the tensor */
BOOST_UBLAS_INLINE
extents_type const& extents () const {
return this->extents_;
}
/** @brief Returns a \c const reference to the container. */
BOOST_UBLAS_INLINE
const_pointer data () const {
return this->data_.data();
}
/** @brief Returns a \c const reference to the container. */
BOOST_UBLAS_INLINE
pointer data () {
return this->data_.data();
}
/** @brief Element access using a single index.
*
* @code auto a = A[i]; @endcode
*
* @param i zero-based index where 0 <= i < this->size()
*/
BOOST_UBLAS_INLINE
const_reference operator [] (size_type i) const {
return this->data_[i];
}
/** @brief Element access using a single index.
*
*
* @code A[i] = a; @endcode
*
* @param i zero-based index where 0 <= i < this->size()
*/
BOOST_UBLAS_INLINE
reference operator [] (size_type i)
{
return this->data_[i];
}
/** @brief Element access using a multi-index or single-index.
*
*
* @code auto a = A.at(i,j,k); @endcode or
* @code auto a = A.at(i); @endcode
*
* @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0)
* @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank()
*/
template<class ... size_types>
BOOST_UBLAS_INLINE
const_reference at (size_type i, size_types ... is) const {
if constexpr (sizeof...(is) == 0)
return this->data_[i];
else
return this->data_[detail::access<0ul>(size_type(0),this->strides_,i,std::forward<size_types>(is)...)];
}
/** @brief Element access using a multi-index or single-index.
*
*
* @code A.at(i,j,k) = a; @endcode or
* @code A.at(i) = a; @endcode
*
* @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0)
* @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank()
*/
BOOST_UBLAS_INLINE
template<class ... size_types>
reference at (size_type i, size_types ... is) {
if constexpr (sizeof...(is) == 0)
return this->data_[i];
else
return this->data_[detail::access<0ul>(size_type(0),this->strides_,i,std::forward<size_types>(is)...)];
}
/** @brief Element access using a single index.
*
*
* @code A(i) = a; @endcode
*
* @param i zero-based index where 0 <= i < this->size()
*/
BOOST_UBLAS_INLINE
const_reference operator()(size_type i) const {
return this->data_[i];
}
/** @brief Element access using a single index.
*
* @code A(i) = a; @endcode
*
* @param i zero-based index where 0 <= i < this->size()
*/
BOOST_UBLAS_INLINE
reference operator()(size_type i){
return this->data_[i];
}
/** @brief Generates a tensor index for tensor contraction
*
*
* @code auto Ai = A(_i,_j,k); @endcode
*
* @param i placeholder
* @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank()
*/
BOOST_UBLAS_INLINE
template<std::size_t I, class ... index_types>
decltype(auto) operator() (index::index_type<I> p, index_types ... ps) const
{
constexpr auto N = sizeof...(ps)+1;
if( N != this->rank() )
throw std::runtime_error("Error in boost::numeric::ublas::operator(): size of provided index_types does not match with the rank.");
return std::make_pair( std::cref(*this), std::make_tuple( p, std::forward<index_types>(ps)... ) );
}
/** @brief Reshapes the tensor
*
*
* (1) @code A.reshape(extents{m,n,o}); @endcode or
* (2) @code A.reshape(extents{m,n,o},4); @endcode
*
* If the size of this smaller than the specified extents than
* default constructed (1) or specified (2) value is appended.
*
* @note rank of the tensor might also change.
*
* @param e extents with which the tensor is reshaped.
* @param v value which is appended if the tensor is enlarged.
*/
BOOST_UBLAS_INLINE
void reshape (extents_type const& e, value_type v = value_type{})
{
this->extents_ = e;
this->strides_ = strides_type(this->extents_);
if(e.product() != this->size())
this->data_.resize (this->extents_.product(), v);
}
friend void swap(tensor& lhs, tensor& rhs) {
std::swap(lhs.data_ , rhs.data_ );
std::swap(lhs.extents_, rhs.extents_);
std::swap(lhs.strides_, rhs.strides_);
}
/// \brief return an iterator on the first element of the tensor
BOOST_UBLAS_INLINE
const_iterator begin () const {
return data_.begin ();
}
/// \brief return an iterator on the first element of the tensor
BOOST_UBLAS_INLINE
const_iterator cbegin () const {
return data_.cbegin ();
}
/// \brief return an iterator after the last element of the tensor
BOOST_UBLAS_INLINE
const_iterator end () const {
return data_.end();
}
/// \brief return an iterator after the last element of the tensor
BOOST_UBLAS_INLINE
const_iterator cend () const {
return data_.cend ();
}
/// \brief Return an iterator on the first element of the tensor
BOOST_UBLAS_INLINE
iterator begin () {
return data_.begin();
}
/// \brief Return an iterator at the end of the tensor
BOOST_UBLAS_INLINE
iterator end () {
return data_.end();
}
/// \brief Return a const reverse iterator before the first element of the reversed tensor (i.e. end() of normal tensor)
BOOST_UBLAS_INLINE
const_reverse_iterator rbegin () const {
return data_.rbegin();
}
/// \brief Return a const reverse iterator before the first element of the reversed tensor (i.e. end() of normal tensor)
BOOST_UBLAS_INLINE
const_reverse_iterator crbegin () const {
return data_.crbegin();
}
/// \brief Return a const reverse iterator on the end of the reverse tensor (i.e. first element of the normal tensor)
BOOST_UBLAS_INLINE
const_reverse_iterator rend () const {
return data_.rend();
}
/// \brief Return a const reverse iterator on the end of the reverse tensor (i.e. first element of the normal tensor)
BOOST_UBLAS_INLINE
const_reverse_iterator crend () const {
return data_.crend();
}
/// \brief Return a const reverse iterator before the first element of the reversed tensor (i.e. end() of normal tensor)
BOOST_UBLAS_INLINE
reverse_iterator rbegin () {
return data_.rbegin();
}
/// \brief Return a const reverse iterator on the end of the reverse tensor (i.e. first element of the normal tensor)
BOOST_UBLAS_INLINE
reverse_iterator rend () {
return data_.rend();
}
#if 0
// -------------
// Serialization
// -------------
/// Serialize a tensor into and archive as defined in Boost
/// \param ar Archive object. Can be a flat file, an XML file or any other stream
/// \param file_version Optional file version (not yet used)
template<class Archive>
void serialize(Archive & ar, const unsigned int /* file_version */){
ar & serialization::make_nvp("data",data_);
}
#endif
private:
extents_type extents_;
strides_type strides_;
array_type data_;
};
}}} // namespaces
#endif