Update Eigen to commit:21e89b930c6af56dbdaeea2a91d8b9d6fd2c208a
CHANGELOG
=========
21e89b930 - Enable default behavior for pmin<PropagateFast>, predux_min, etc
4fdf87bbf - clean up intel packet reductions
a7f183cad - Add factory/getters for quat coeffs in both orders
d81aa18f4 - Explicitly construct the scalar for non-implicitly convertible types
PiperOrigin-RevId: 766811637
Change-Id: I6184e3c71bb418f61715e8c3d5fb29d357749417
diff --git a/Eigen/Core b/Eigen/Core
index 6ae069a..8944d54 100644
--- a/Eigen/Core
+++ b/Eigen/Core
@@ -192,45 +192,38 @@
#include "src/Core/arch/Default/BFloat16.h"
#include "src/Core/arch/Default/GenericPacketMathFunctionsFwd.h"
-#if defined EIGEN_VECTORIZE_AVX512
+#if defined EIGEN_VECTORIZE_SSE
#include "src/Core/arch/SSE/PacketMath.h"
+#include "src/Core/arch/SSE/Reductions.h"
+#include "src/Core/arch/SSE/Complex.h"
+#include "src/Core/arch/SSE/TypeCasting.h"
+#include "src/Core/arch/SSE/MathFunctions.h"
+#endif
+
+#if defined EIGEN_VECTORIZE_AVX
#include "src/Core/arch/AVX/PacketMath.h"
+#include "src/Core/arch/AVX/Reductions.h"
+#include "src/Core/arch/AVX/Complex.h"
+#include "src/Core/arch/AVX/TypeCasting.h"
+#include "src/Core/arch/AVX/MathFunctions.h"
+#endif
+
+#if defined EIGEN_VECTORIZE_AVX512
#include "src/Core/arch/AVX512/PacketMath.h"
+#include "src/Core/arch/AVX512/Reductions.h"
+#include "src/Core/arch/AVX512/Complex.h"
+#include "src/Core/arch/AVX512/TypeCasting.h"
+#include "src/Core/arch/AVX512/MathFunctions.h"
+#include "src/Core/arch/AVX512/TrsmKernel.h"
+#endif
+
#if defined EIGEN_VECTORIZE_AVX512FP16
#include "src/Core/arch/AVX512/PacketMathFP16.h"
-#endif
-#include "src/Core/arch/SSE/TypeCasting.h"
-#include "src/Core/arch/AVX/TypeCasting.h"
-#include "src/Core/arch/AVX512/TypeCasting.h"
-#if defined EIGEN_VECTORIZE_AVX512FP16
#include "src/Core/arch/AVX512/TypeCastingFP16.h"
-#endif
-#include "src/Core/arch/SSE/Complex.h"
-#include "src/Core/arch/AVX/Complex.h"
-#include "src/Core/arch/AVX512/Complex.h"
-#include "src/Core/arch/SSE/MathFunctions.h"
-#include "src/Core/arch/AVX/MathFunctions.h"
-#include "src/Core/arch/AVX512/MathFunctions.h"
-#if defined EIGEN_VECTORIZE_AVX512FP16
#include "src/Core/arch/AVX512/MathFunctionsFP16.h"
#endif
-#include "src/Core/arch/AVX512/TrsmKernel.h"
-#elif defined EIGEN_VECTORIZE_AVX
- // Use AVX for floats and doubles, SSE for integers
-#include "src/Core/arch/SSE/PacketMath.h"
-#include "src/Core/arch/SSE/TypeCasting.h"
-#include "src/Core/arch/SSE/Complex.h"
-#include "src/Core/arch/AVX/PacketMath.h"
-#include "src/Core/arch/AVX/TypeCasting.h"
-#include "src/Core/arch/AVX/Complex.h"
-#include "src/Core/arch/SSE/MathFunctions.h"
-#include "src/Core/arch/AVX/MathFunctions.h"
-#elif defined EIGEN_VECTORIZE_SSE
-#include "src/Core/arch/SSE/PacketMath.h"
-#include "src/Core/arch/SSE/TypeCasting.h"
-#include "src/Core/arch/SSE/MathFunctions.h"
-#include "src/Core/arch/SSE/Complex.h"
-#elif defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX)
+
+#if defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX)
#include "src/Core/arch/AltiVec/PacketMath.h"
#include "src/Core/arch/AltiVec/TypeCasting.h"
#include "src/Core/arch/AltiVec/MathFunctions.h"
diff --git a/Eigen/src/Core/GenericPacketMath.h b/Eigen/src/Core/GenericPacketMath.h
index d45cb4b..ab9c0e1 100644
--- a/Eigen/src/Core/GenericPacketMath.h
+++ b/Eigen/src/Core/GenericPacketMath.h
@@ -608,7 +608,7 @@
/** \internal \returns the min or of \a a and \a b (coeff-wise)
If either \a a or \a b are NaN, the result is implementation defined. */
-template <int NaNPropagation>
+template <int NaNPropagation, bool IsInteger>
struct pminmax_impl {
template <typename Packet, typename Op>
static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) {
@@ -619,7 +619,7 @@
/** \internal \returns the min or max of \a a and \a b (coeff-wise)
If either \a a or \a b are NaN, NaN is returned. */
template <>
-struct pminmax_impl<PropagateNaN> {
+struct pminmax_impl<PropagateNaN, false> {
template <typename Packet, typename Op>
static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) {
Packet not_nan_mask_a = pcmp_eq(a, a);
@@ -632,7 +632,7 @@
If both \a a and \a b are NaN, NaN is returned.
Equivalent to std::fmin(a, b). */
template <>
-struct pminmax_impl<PropagateNumbers> {
+struct pminmax_impl<PropagateNumbers, false> {
template <typename Packet, typename Op>
static EIGEN_DEVICE_FUNC inline Packet run(const Packet& a, const Packet& b, Op op) {
Packet not_nan_mask_a = pcmp_eq(a, a);
@@ -654,7 +654,8 @@
NaNPropagation determines the NaN propagation semantics. */
template <int NaNPropagation, typename Packet>
EIGEN_DEVICE_FUNC inline Packet pmin(const Packet& a, const Packet& b) {
- return pminmax_impl<NaNPropagation>::run(a, b, EIGEN_BINARY_OP_NAN_PROPAGATION(Packet, (pmin<Packet>)));
+ constexpr bool IsInteger = NumTraits<typename unpacket_traits<Packet>::type>::IsInteger;
+ return pminmax_impl<NaNPropagation, IsInteger>::run(a, b, EIGEN_BINARY_OP_NAN_PROPAGATION(Packet, (pmin<Packet>)));
}
/** \internal \returns the max of \a a and \a b (coeff-wise)
@@ -668,7 +669,8 @@
NaNPropagation determines the NaN propagation semantics. */
template <int NaNPropagation, typename Packet>
EIGEN_DEVICE_FUNC inline Packet pmax(const Packet& a, const Packet& b) {
- return pminmax_impl<NaNPropagation>::run(a, b, EIGEN_BINARY_OP_NAN_PROPAGATION(Packet, (pmax<Packet>)));
+ constexpr bool IsInteger = NumTraits<typename unpacket_traits<Packet>::type>::IsInteger;
+ return pminmax_impl<NaNPropagation, IsInteger>::run(a, b, EIGEN_BINARY_OP_NAN_PROPAGATION(Packet, (pmax<Packet>)));
}
/** \internal \returns the absolute value of \a a */
@@ -1244,26 +1246,46 @@
template <typename Packet>
EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_min(const Packet& a) {
typedef typename unpacket_traits<Packet>::type Scalar;
- return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmin<PropagateFast, Scalar>)));
+ return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmin<Scalar>)));
}
-template <int NaNPropagation, typename Packet>
-EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_min(const Packet& a) {
- typedef typename unpacket_traits<Packet>::type Scalar;
- return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmin<NaNPropagation, Scalar>)));
-}
-
-/** \internal \returns the min of the elements of \a a */
+/** \internal \returns the max of the elements of \a a */
template <typename Packet>
EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_max(const Packet& a) {
typedef typename unpacket_traits<Packet>::type Scalar;
- return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmax<PropagateFast, Scalar>)));
+ return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmax<Scalar>)));
+}
+
+template <int NaNPropagation, typename Packet>
+struct predux_min_max_helper_impl {
+ using Scalar = typename unpacket_traits<Packet>::type;
+ static constexpr bool UsePredux_ = NaNPropagation == PropagateFast || NumTraits<Scalar>::IsInteger;
+ template <bool UsePredux = UsePredux_, std::enable_if_t<!UsePredux, bool> = true>
+ static EIGEN_DEVICE_FUNC inline Scalar run_min(const Packet& a) {
+ return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmin<NaNPropagation, Scalar>)));
+ }
+ template <bool UsePredux = UsePredux_, std::enable_if_t<!UsePredux, bool> = true>
+ static EIGEN_DEVICE_FUNC inline Scalar run_max(const Packet& a) {
+ return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmax<NaNPropagation, Scalar>)));
+ }
+ template <bool UsePredux = UsePredux_, std::enable_if_t<UsePredux, bool> = true>
+ static EIGEN_DEVICE_FUNC inline Scalar run_min(const Packet& a) {
+ return predux_min(a);
+ }
+ template <bool UsePredux = UsePredux_, std::enable_if_t<UsePredux, bool> = true>
+ static EIGEN_DEVICE_FUNC inline Scalar run_max(const Packet& a) {
+ return predux_max(a);
+ }
+};
+
+template <int NaNPropagation, typename Packet>
+EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_min(const Packet& a) {
+ return predux_min_max_helper_impl<NaNPropagation, Packet>::run_min(a);
}
template <int NaNPropagation, typename Packet>
EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_max(const Packet& a) {
- typedef typename unpacket_traits<Packet>::type Scalar;
- return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmax<NaNPropagation, Scalar>)));
+ return predux_min_max_helper_impl<NaNPropagation, Packet>::run_max(a);
}
#undef EIGEN_BINARY_OP_NAN_PROPAGATION
diff --git a/Eigen/src/Core/arch/AVX/PacketMath.h b/Eigen/src/Core/arch/AVX/PacketMath.h
index 470e36d..1b1d326 100644
--- a/Eigen/src/Core/arch/AVX/PacketMath.h
+++ b/Eigen/src/Core/arch/AVX/PacketMath.h
@@ -654,25 +654,6 @@
EIGEN_STRONG_INLINE uint64_t pfirst<Packet4ul>(const Packet4ul& a) {
return _mm_extract_epi64_0(_mm256_castsi256_si128(a));
}
-template <>
-EIGEN_STRONG_INLINE int64_t predux<Packet4l>(const Packet4l& a) {
- __m128i r = _mm_add_epi64(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
- return _mm_extract_epi64_0(r) + _mm_extract_epi64_1(r);
-}
-template <>
-EIGEN_STRONG_INLINE uint64_t predux<Packet4ul>(const Packet4ul& a) {
- __m128i r = _mm_add_epi64(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
- return numext::bit_cast<uint64_t>(_mm_extract_epi64_0(r) + _mm_extract_epi64_1(r));
-}
-
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet4l& a) {
- return _mm256_movemask_pd(_mm256_castsi256_pd(a)) != 0;
-}
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet4ul& a) {
- return _mm256_movemask_pd(_mm256_castsi256_pd(a)) != 0;
-}
#define MM256_SHUFFLE_EPI64(A, B, M) _mm256_shuffle_pd(_mm256_castsi256_pd(A), _mm256_castsi256_pd(B), M)
EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4l, 4>& kernel) {
@@ -1956,23 +1937,6 @@
}
template <>
-EIGEN_STRONG_INLINE float predux<Packet8f>(const Packet8f& a) {
- return predux(Packet4f(_mm_add_ps(_mm256_castps256_ps128(a), _mm256_extractf128_ps(a, 1))));
-}
-template <>
-EIGEN_STRONG_INLINE double predux<Packet4d>(const Packet4d& a) {
- return predux(Packet2d(_mm_add_pd(_mm256_castpd256_pd128(a), _mm256_extractf128_pd(a, 1))));
-}
-template <>
-EIGEN_STRONG_INLINE int predux<Packet8i>(const Packet8i& a) {
- return predux(Packet4i(_mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1))));
-}
-template <>
-EIGEN_STRONG_INLINE uint32_t predux<Packet8ui>(const Packet8ui& a) {
- return predux(Packet4ui(_mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1))));
-}
-
-template <>
EIGEN_STRONG_INLINE Packet4f predux_half_dowto4<Packet8f>(const Packet8f& a) {
return _mm_add_ps(_mm256_castps256_ps128(a), _mm256_extractf128_ps(a, 1));
}
@@ -1985,82 +1949,6 @@
return _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
}
-template <>
-EIGEN_STRONG_INLINE float predux_mul<Packet8f>(const Packet8f& a) {
- Packet8f tmp;
- tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a, a, 1));
- tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp, tmp, _MM_SHUFFLE(1, 0, 3, 2)));
- return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp, tmp, 1)));
-}
-template <>
-EIGEN_STRONG_INLINE double predux_mul<Packet4d>(const Packet4d& a) {
- Packet4d tmp;
- tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a, a, 1));
- return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
-}
-
-template <>
-EIGEN_STRONG_INLINE float predux_min<Packet8f>(const Packet8f& a) {
- Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a, a, 1));
- tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp, tmp, _MM_SHUFFLE(1, 0, 3, 2)));
- return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp, tmp, 1)));
-}
-template <>
-EIGEN_STRONG_INLINE double predux_min<Packet4d>(const Packet4d& a) {
- Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a, a, 1));
- return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
-}
-
-template <>
-EIGEN_STRONG_INLINE float predux_max<Packet8f>(const Packet8f& a) {
- Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a, a, 1));
- tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp, tmp, _MM_SHUFFLE(1, 0, 3, 2)));
- return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp, tmp, 1)));
-}
-
-template <>
-EIGEN_STRONG_INLINE double predux_max<Packet4d>(const Packet4d& a) {
- Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a, a, 1));
- return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
-}
-
-// not needed yet
-// template<> EIGEN_STRONG_INLINE bool predux_all(const Packet8f& x)
-// {
-// return _mm256_movemask_ps(x)==0xFF;
-// }
-
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet8f& x) {
- return _mm256_movemask_ps(x) != 0;
-}
-
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet4d& x) {
- return _mm256_movemask_pd(x) != 0;
-}
-
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet8i& x) {
- return _mm256_movemask_ps(_mm256_castsi256_ps(x)) != 0;
-}
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet8ui& x) {
- return _mm256_movemask_ps(_mm256_castsi256_ps(x)) != 0;
-}
-
-#ifndef EIGEN_VECTORIZE_AVX512FP16
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet8h& x) {
- return _mm_movemask_epi8(x) != 0;
-}
-#endif // EIGEN_VECTORIZE_AVX512FP16
-
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet8bf& x) {
- return _mm_movemask_epi8(x) != 0;
-}
-
EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8f, 8>& kernel) {
__m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
__m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
@@ -2474,34 +2362,6 @@
}
template <>
-EIGEN_STRONG_INLINE Eigen::half predux<Packet8h>(const Packet8h& a) {
- Packet8f af = half2float(a);
- float reduced = predux<Packet8f>(af);
- return Eigen::half(reduced);
-}
-
-template <>
-EIGEN_STRONG_INLINE Eigen::half predux_max<Packet8h>(const Packet8h& a) {
- Packet8f af = half2float(a);
- float reduced = predux_max<Packet8f>(af);
- return Eigen::half(reduced);
-}
-
-template <>
-EIGEN_STRONG_INLINE Eigen::half predux_min<Packet8h>(const Packet8h& a) {
- Packet8f af = half2float(a);
- float reduced = predux_min<Packet8f>(af);
- return Eigen::half(reduced);
-}
-
-template <>
-EIGEN_STRONG_INLINE Eigen::half predux_mul<Packet8h>(const Packet8h& a) {
- Packet8f af = half2float(a);
- float reduced = predux_mul<Packet8f>(af);
- return Eigen::half(reduced);
-}
-
-template <>
EIGEN_STRONG_INLINE Packet8h preverse(const Packet8h& a) {
__m128i m = _mm_setr_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1);
return _mm_shuffle_epi8(a, m);
@@ -2860,26 +2720,6 @@
}
template <>
-EIGEN_STRONG_INLINE bfloat16 predux<Packet8bf>(const Packet8bf& a) {
- return static_cast<bfloat16>(predux<Packet8f>(Bf16ToF32(a)));
-}
-
-template <>
-EIGEN_STRONG_INLINE bfloat16 predux_max<Packet8bf>(const Packet8bf& a) {
- return static_cast<bfloat16>(predux_max<Packet8f>(Bf16ToF32(a)));
-}
-
-template <>
-EIGEN_STRONG_INLINE bfloat16 predux_min<Packet8bf>(const Packet8bf& a) {
- return static_cast<bfloat16>(predux_min<Packet8f>(Bf16ToF32(a)));
-}
-
-template <>
-EIGEN_STRONG_INLINE bfloat16 predux_mul<Packet8bf>(const Packet8bf& a) {
- return static_cast<bfloat16>(predux_mul<Packet8f>(Bf16ToF32(a)));
-}
-
-template <>
EIGEN_STRONG_INLINE Packet8bf preverse(const Packet8bf& a) {
__m128i m = _mm_setr_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1);
return _mm_shuffle_epi8(a, m);
diff --git a/Eigen/src/Core/arch/AVX/Reductions.h b/Eigen/src/Core/arch/AVX/Reductions.h
new file mode 100644
index 0000000..237617c
--- /dev/null
+++ b/Eigen/src/Core/arch/AVX/Reductions.h
@@ -0,0 +1,353 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2025 Charlie Schlosser <cs.schlosser@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REDUCTIONS_AVX_H
+#define EIGEN_REDUCTIONS_AVX_H
+
+// IWYU pragma: private
+#include "../../InternalHeaderCheck.h"
+
+namespace Eigen {
+
+namespace internal {
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet8i -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <>
+EIGEN_STRONG_INLINE int predux(const Packet8i& a) {
+ Packet4i lo = _mm256_castsi256_si128(a);
+ Packet4i hi = _mm256_extractf128_si256(a, 1);
+ return predux(padd(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE int predux_mul(const Packet8i& a) {
+ Packet4i lo = _mm256_castsi256_si128(a);
+ Packet4i hi = _mm256_extractf128_si256(a, 1);
+ return predux_mul(pmul(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE int predux_min(const Packet8i& a) {
+ Packet4i lo = _mm256_castsi256_si128(a);
+ Packet4i hi = _mm256_extractf128_si256(a, 1);
+ return predux_min(pmin(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE int predux_max(const Packet8i& a) {
+ Packet4i lo = _mm256_castsi256_si128(a);
+ Packet4i hi = _mm256_extractf128_si256(a, 1);
+ return predux_max(pmax(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet8i& a) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_movemask_epi8(a) != 0x0;
+#else
+ return _mm256_movemask_ps(_mm256_castsi256_ps(a)) != 0x0;
+#endif
+}
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet8ui -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <>
+EIGEN_STRONG_INLINE uint32_t predux(const Packet8ui& a) {
+ Packet4ui lo = _mm256_castsi256_si128(a);
+ Packet4ui hi = _mm256_extractf128_si256(a, 1);
+ return predux(padd(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE uint32_t predux_mul(const Packet8ui& a) {
+ Packet4ui lo = _mm256_castsi256_si128(a);
+ Packet4ui hi = _mm256_extractf128_si256(a, 1);
+ return predux_mul(pmul(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE uint32_t predux_min(const Packet8ui& a) {
+ Packet4ui lo = _mm256_castsi256_si128(a);
+ Packet4ui hi = _mm256_extractf128_si256(a, 1);
+ return predux_min(pmin(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE uint32_t predux_max(const Packet8ui& a) {
+ Packet4ui lo = _mm256_castsi256_si128(a);
+ Packet4ui hi = _mm256_extractf128_si256(a, 1);
+ return predux_max(pmax(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet8ui& a) {
+#ifdef EIGEN_VECTORIZE_AVX2
+ return _mm256_movemask_epi8(a) != 0x0;
+#else
+ return _mm256_movemask_ps(_mm256_castsi256_ps(a)) != 0x0;
+#endif
+}
+
+#ifdef EIGEN_VECTORIZE_AVX2
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet4l -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <>
+EIGEN_STRONG_INLINE int64_t predux(const Packet4l& a) {
+ Packet2l lo = _mm256_castsi256_si128(a);
+ Packet2l hi = _mm256_extractf128_si256(a, 1);
+ return predux(padd(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet4l& a) {
+ return _mm256_movemask_pd(_mm256_castsi256_pd(a)) != 0x0;
+}
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet4ul -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <>
+EIGEN_STRONG_INLINE uint64_t predux(const Packet4ul& a) {
+ return static_cast<uint64_t>(predux(Packet4l(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet4ul& a) {
+ return _mm256_movemask_pd(_mm256_castsi256_pd(a)) != 0x0;
+}
+
+#endif
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet8f -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <>
+EIGEN_STRONG_INLINE float predux(const Packet8f& a) {
+ Packet4f lo = _mm256_castps256_ps128(a);
+ Packet4f hi = _mm256_extractf128_ps(a, 1);
+ return predux(padd(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_mul(const Packet8f& a) {
+ Packet4f lo = _mm256_castps256_ps128(a);
+ Packet4f hi = _mm256_extractf128_ps(a, 1);
+ return predux_mul(pmul(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_min(const Packet8f& a) {
+ Packet4f lo = _mm256_castps256_ps128(a);
+ Packet4f hi = _mm256_extractf128_ps(a, 1);
+ return predux_min(pmin(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_min<PropagateNumbers>(const Packet8f& a) {
+ Packet4f lo = _mm256_castps256_ps128(a);
+ Packet4f hi = _mm256_extractf128_ps(a, 1);
+ return predux_min<PropagateNumbers>(pmin<PropagateNumbers>(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_min<PropagateNaN>(const Packet8f& a) {
+ Packet4f lo = _mm256_castps256_ps128(a);
+ Packet4f hi = _mm256_extractf128_ps(a, 1);
+ return predux_min<PropagateNaN>(pmin<PropagateNaN>(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_max(const Packet8f& a) {
+ Packet4f lo = _mm256_castps256_ps128(a);
+ Packet4f hi = _mm256_extractf128_ps(a, 1);
+ return predux_max(pmax(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_max<PropagateNumbers>(const Packet8f& a) {
+ Packet4f lo = _mm256_castps256_ps128(a);
+ Packet4f hi = _mm256_extractf128_ps(a, 1);
+ return predux_max<PropagateNumbers>(pmax<PropagateNumbers>(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_max<PropagateNaN>(const Packet8f& a) {
+ Packet4f lo = _mm256_castps256_ps128(a);
+ Packet4f hi = _mm256_extractf128_ps(a, 1);
+ return predux_max<PropagateNaN>(pmax<PropagateNaN>(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet8f& a) {
+ return _mm256_movemask_ps(a) != 0x0;
+}
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet4d -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <>
+EIGEN_STRONG_INLINE double predux(const Packet4d& a) {
+ Packet2d lo = _mm256_castpd256_pd128(a);
+ Packet2d hi = _mm256_extractf128_pd(a, 1);
+ return predux(padd(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_mul(const Packet4d& a) {
+ Packet2d lo = _mm256_castpd256_pd128(a);
+ Packet2d hi = _mm256_extractf128_pd(a, 1);
+ return predux_mul(pmul(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_min(const Packet4d& a) {
+ Packet2d lo = _mm256_castpd256_pd128(a);
+ Packet2d hi = _mm256_extractf128_pd(a, 1);
+ return predux_min(pmin(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_min<PropagateNumbers>(const Packet4d& a) {
+ Packet2d lo = _mm256_castpd256_pd128(a);
+ Packet2d hi = _mm256_extractf128_pd(a, 1);
+ return predux_min<PropagateNumbers>(pmin<PropagateNumbers>(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_min<PropagateNaN>(const Packet4d& a) {
+ Packet2d lo = _mm256_castpd256_pd128(a);
+ Packet2d hi = _mm256_extractf128_pd(a, 1);
+ return predux_min<PropagateNaN>(pmin<PropagateNaN>(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_max(const Packet4d& a) {
+ Packet2d lo = _mm256_castpd256_pd128(a);
+ Packet2d hi = _mm256_extractf128_pd(a, 1);
+ return predux_max(pmax(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_max<PropagateNumbers>(const Packet4d& a) {
+ Packet2d lo = _mm256_castpd256_pd128(a);
+ Packet2d hi = _mm256_extractf128_pd(a, 1);
+ return predux_max<PropagateNumbers>(pmax<PropagateNumbers>(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_max<PropagateNaN>(const Packet4d& a) {
+ Packet2d lo = _mm256_castpd256_pd128(a);
+ Packet2d hi = _mm256_extractf128_pd(a, 1);
+ return predux_max<PropagateNaN>(pmax<PropagateNaN>(lo, hi));
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet4d& a) {
+ return _mm256_movemask_pd(a) != 0x0;
+}
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet8h -- -- -- -- -- -- -- -- -- -- -- -- */
+#ifndef EIGEN_VECTORIZE_AVX512FP16
+
+template <>
+EIGEN_STRONG_INLINE half predux(const Packet8h& a) {
+ return static_cast<half>(predux(half2float(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE half predux_mul(const Packet8h& a) {
+ return static_cast<half>(predux_mul(half2float(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE half predux_min(const Packet8h& a) {
+ return static_cast<half>(predux_min(half2float(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE half predux_min<PropagateNumbers>(const Packet8h& a) {
+ return static_cast<half>(predux_min<PropagateNumbers>(half2float(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE half predux_min<PropagateNaN>(const Packet8h& a) {
+ return static_cast<half>(predux_min<PropagateNaN>(half2float(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE half predux_max(const Packet8h& a) {
+ return static_cast<half>(predux_max(half2float(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE half predux_max<PropagateNumbers>(const Packet8h& a) {
+ return static_cast<half>(predux_max<PropagateNumbers>(half2float(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE half predux_max<PropagateNaN>(const Packet8h& a) {
+ return static_cast<half>(predux_max<PropagateNaN>(half2float(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet8h& a) {
+ return _mm_movemask_epi8(a) != 0;
+}
+#endif // EIGEN_VECTORIZE_AVX512FP16
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet8bf -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux(const Packet8bf& a) {
+ return static_cast<bfloat16>(predux<Packet8f>(Bf16ToF32(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_mul(const Packet8bf& a) {
+ return static_cast<bfloat16>(predux_mul<Packet8f>(Bf16ToF32(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_min(const Packet8bf& a) {
+ return static_cast<bfloat16>(predux_min(Bf16ToF32(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_min<PropagateNumbers>(const Packet8bf& a) {
+ return static_cast<bfloat16>(predux_min<PropagateNumbers>(Bf16ToF32(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_min<PropagateNaN>(const Packet8bf& a) {
+ return static_cast<bfloat16>(predux_min<PropagateNaN>(Bf16ToF32(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_max(const Packet8bf& a) {
+ return static_cast<bfloat16>(predux_max<Packet8f>(Bf16ToF32(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_max<PropagateNumbers>(const Packet8bf& a) {
+ return static_cast<bfloat16>(predux_max<PropagateNumbers>(Bf16ToF32(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_max<PropagateNaN>(const Packet8bf& a) {
+ return static_cast<bfloat16>(predux_max<PropagateNaN>(Bf16ToF32(a)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet8bf& a) {
+ return _mm_movemask_epi8(a) != 0;
+}
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // EIGEN_REDUCTIONS_AVX_H
diff --git a/Eigen/src/Core/arch/AVX512/PacketMath.h b/Eigen/src/Core/arch/AVX512/PacketMath.h
index 27a0f10..932b056 100644
--- a/Eigen/src/Core/arch/AVX512/PacketMath.h
+++ b/Eigen/src/Core/arch/AVX512/PacketMath.h
@@ -1495,40 +1495,6 @@
#endif
template <>
-EIGEN_STRONG_INLINE float predux<Packet16f>(const Packet16f& a) {
-#ifdef EIGEN_VECTORIZE_AVX512DQ
- __m256 lane0 = _mm512_extractf32x8_ps(a, 0);
- __m256 lane1 = _mm512_extractf32x8_ps(a, 1);
- Packet8f x = _mm256_add_ps(lane0, lane1);
- return predux<Packet8f>(x);
-#else
- __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
- __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
- __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
- __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
- __m128 sum = _mm_add_ps(_mm_add_ps(lane0, lane1), _mm_add_ps(lane2, lane3));
- return predux<Packet4f>(sum);
-#endif
-}
-template <>
-EIGEN_STRONG_INLINE double predux<Packet8d>(const Packet8d& a) {
- __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
- __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
- __m256d sum = _mm256_add_pd(lane0, lane1);
- return predux<Packet4d>(sum);
-}
-
-template <>
-EIGEN_STRONG_INLINE int64_t predux<Packet8l>(const Packet8l& a) {
- return _mm512_reduce_add_epi64(a);
-}
-
-template <>
-EIGEN_STRONG_INLINE int predux<Packet16i>(const Packet16i& a) {
- return _mm512_reduce_add_epi32(a);
-}
-
-template <>
EIGEN_STRONG_INLINE Packet8f predux_half_dowto4<Packet16f>(const Packet16f& a) {
#ifdef EIGEN_VECTORIZE_AVX512DQ
__m256 lane0 = _mm512_extractf32x8_ps(a, 0);
@@ -1574,136 +1540,6 @@
return _mm256_add_epi64(lane0, lane1);
}
-template <>
-EIGEN_STRONG_INLINE float predux_mul<Packet16f>(const Packet16f& a) {
-// #ifdef EIGEN_VECTORIZE_AVX512DQ
-#if 0
- Packet8f lane0 = _mm512_extractf32x8_ps(a, 0);
- Packet8f lane1 = _mm512_extractf32x8_ps(a, 1);
- Packet8f res = pmul(lane0, lane1);
- res = pmul(res, _mm256_permute2f128_ps(res, res, 1));
- res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
- return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
-#else
- __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
- __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
- __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
- __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
- __m128 res = pmul(pmul(lane0, lane1), pmul(lane2, lane3));
- res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
- return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
-#endif
-}
-template <>
-EIGEN_STRONG_INLINE double predux_mul<Packet8d>(const Packet8d& a) {
- __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
- __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
- __m256d res = pmul(lane0, lane1);
- res = pmul(res, _mm256_permute2f128_pd(res, res, 1));
- return pfirst(pmul(res, _mm256_shuffle_pd(res, res, 1)));
-}
-template <>
-EIGEN_STRONG_INLINE int predux_mul<Packet16i>(const Packet16i& a) {
- return _mm512_reduce_mul_epi32(a);
-}
-
-#if EIGEN_COMP_MSVC
-// MSVC's _mm512_reduce_mul_epi64 is borked, at least up to and including 1939.
-// alignas(64) int64_t data[] = { 1,1,-1,-1,1,-1,-1,-1 };
-// int64_t out = _mm512_reduce_mul_epi64(_mm512_load_epi64(data));
-// produces garbage: 4294967295. It seems to happen whenever the output is supposed to be negative.
-// Fall back to a manual approach:
-template <>
-EIGEN_STRONG_INLINE int64_t predux_mul<Packet8l>(const Packet8l& a) {
- Packet4l lane0 = _mm512_extracti64x4_epi64(a, 0);
- Packet4l lane1 = _mm512_extracti64x4_epi64(a, 1);
- Packet4l res = pmul(lane0, lane1);
- res = pmul(res, Packet4l(_mm256_permute2x128_si256(res, res, 1)));
- res = pmul(res, Packet4l(_mm256_shuffle_epi32(res, 0xE)));
- return pfirst(res);
-}
-#else
-template <>
-EIGEN_STRONG_INLINE int64_t predux_mul<Packet8l>(const Packet8l& a) {
- return _mm512_reduce_mul_epi64(a);
-}
-#endif
-
-template <>
-EIGEN_STRONG_INLINE float predux_min<Packet16f>(const Packet16f& a) {
- __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
- __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
- __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
- __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
- __m128 res = _mm_min_ps(_mm_min_ps(lane0, lane1), _mm_min_ps(lane2, lane3));
- res = _mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
- return pfirst(_mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
-}
-template <>
-EIGEN_STRONG_INLINE double predux_min<Packet8d>(const Packet8d& a) {
- __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
- __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
- __m256d res = _mm256_min_pd(lane0, lane1);
- res = _mm256_min_pd(res, _mm256_permute2f128_pd(res, res, 1));
- return pfirst(_mm256_min_pd(res, _mm256_shuffle_pd(res, res, 1)));
-}
-template <>
-EIGEN_STRONG_INLINE int predux_min<Packet16i>(const Packet16i& a) {
- return _mm512_reduce_min_epi32(a);
-}
-template <>
-EIGEN_STRONG_INLINE int64_t predux_min<Packet8l>(const Packet8l& a) {
- return _mm512_reduce_min_epi64(a);
-}
-
-template <>
-EIGEN_STRONG_INLINE float predux_max<Packet16f>(const Packet16f& a) {
- __m128 lane0 = _mm512_extractf32x4_ps(a, 0);
- __m128 lane1 = _mm512_extractf32x4_ps(a, 1);
- __m128 lane2 = _mm512_extractf32x4_ps(a, 2);
- __m128 lane3 = _mm512_extractf32x4_ps(a, 3);
- __m128 res = _mm_max_ps(_mm_max_ps(lane0, lane1), _mm_max_ps(lane2, lane3));
- res = _mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
- return pfirst(_mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
-}
-
-template <>
-EIGEN_STRONG_INLINE double predux_max<Packet8d>(const Packet8d& a) {
- __m256d lane0 = _mm512_extractf64x4_pd(a, 0);
- __m256d lane1 = _mm512_extractf64x4_pd(a, 1);
- __m256d res = _mm256_max_pd(lane0, lane1);
- res = _mm256_max_pd(res, _mm256_permute2f128_pd(res, res, 1));
- return pfirst(_mm256_max_pd(res, _mm256_shuffle_pd(res, res, 1)));
-}
-template <>
-EIGEN_STRONG_INLINE int predux_max<Packet16i>(const Packet16i& a) {
- return _mm512_reduce_max_epi32(a);
-}
-template <>
-EIGEN_STRONG_INLINE int64_t predux_max<Packet8l>(const Packet8l& a) {
- return _mm512_reduce_max_epi64(a);
-}
-
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet16f& a) {
- return _mm512_reduce_or_epi32(_mm512_castps_si512(a)) != 0;
-}
-
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet16i& a) {
- return _mm512_reduce_or_epi32(a) != 0;
-}
-
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet8d& a) {
- return _mm512_reduce_or_epi64(_mm512_castpd_si512(a)) != 0;
-}
-
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet8l& a) {
- return _mm512_reduce_or_epi64(a) != 0;
-}
-
#define PACK_OUTPUT(OUTPUT, INPUT, INDEX, STRIDE) \
EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[INDEX], INPUT[INDEX + STRIDE]);
@@ -2467,12 +2303,6 @@
}
template <>
-EIGEN_STRONG_INLINE half predux<Packet16h>(const Packet16h& from) {
- Packet16f from_float = half2float(from);
- return half(predux(from_float));
-}
-
-template <>
EIGEN_STRONG_INLINE Packet8h predux_half_dowto4<Packet16h>(const Packet16h& a) {
Packet8h lane0 = _mm256_extractf128_si256(a, 0);
Packet8h lane1 = _mm256_extractf128_si256(a, 1);
@@ -2480,26 +2310,6 @@
}
template <>
-EIGEN_STRONG_INLINE Eigen::half predux_max<Packet16h>(const Packet16h& a) {
- Packet16f af = half2float(a);
- float reduced = predux_max<Packet16f>(af);
- return Eigen::half(reduced);
-}
-
-template <>
-EIGEN_STRONG_INLINE Eigen::half predux_min<Packet16h>(const Packet16h& a) {
- Packet16f af = half2float(a);
- float reduced = predux_min<Packet16f>(af);
- return Eigen::half(reduced);
-}
-
-template <>
-EIGEN_STRONG_INLINE half predux_mul<Packet16h>(const Packet16h& from) {
- Packet16f from_float = half2float(from);
- return half(predux_mul(from_float));
-}
-
-template <>
EIGEN_STRONG_INLINE Packet16h preverse(const Packet16h& a) {
__m128i m = _mm_setr_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1);
return _mm256_insertf128_si256(_mm256_castsi128_si256(_mm_shuffle_epi8(_mm256_extractf128_si256(a, 1), m)),
@@ -3006,26 +2816,6 @@
}
template <>
-EIGEN_STRONG_INLINE bfloat16 predux<Packet16bf>(const Packet16bf& p) {
- return static_cast<bfloat16>(predux<Packet16f>(Bf16ToF32(p)));
-}
-
-template <>
-EIGEN_STRONG_INLINE bfloat16 predux_mul<Packet16bf>(const Packet16bf& from) {
- return static_cast<bfloat16>(predux_mul<Packet16f>(Bf16ToF32(from)));
-}
-
-template <>
-EIGEN_STRONG_INLINE bfloat16 predux_min<Packet16bf>(const Packet16bf& from) {
- return static_cast<bfloat16>(predux_min<Packet16f>(Bf16ToF32(from)));
-}
-
-template <>
-EIGEN_STRONG_INLINE bfloat16 predux_max<Packet16bf>(const Packet16bf& from) {
- return static_cast<bfloat16>(predux_max<Packet16f>(Bf16ToF32(from)));
-}
-
-template <>
EIGEN_STRONG_INLINE Packet16bf preverse(const Packet16bf& a) {
__m256i m = _mm256_setr_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9, 6, 7,
4, 5, 2, 3, 0, 1);
diff --git a/Eigen/src/Core/arch/AVX512/Reductions.h b/Eigen/src/Core/arch/AVX512/Reductions.h
new file mode 100644
index 0000000..f7b4c25
--- /dev/null
+++ b/Eigen/src/Core/arch/AVX512/Reductions.h
@@ -0,0 +1,297 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2025 Charlie Schlosser <cs.schlosser@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REDUCTIONS_AVX512_H
+#define EIGEN_REDUCTIONS_AVX512_H
+
+// IWYU pragma: private
+#include "../../InternalHeaderCheck.h"
+
+namespace Eigen {
+
+namespace internal {
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet16i -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <>
+EIGEN_STRONG_INLINE int predux(const Packet16i& a) {
+ return _mm512_reduce_add_epi32(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE int predux_mul(const Packet16i& a) {
+ return _mm512_reduce_mul_epi32(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE int predux_min(const Packet16i& a) {
+ return _mm512_reduce_min_epi32(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE int predux_max(const Packet16i& a) {
+ return _mm512_reduce_max_epi32(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet16i& a) {
+ return _mm512_reduce_or_epi32(a) != 0;
+}
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet8l -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <>
+EIGEN_STRONG_INLINE int64_t predux(const Packet8l& a) {
+ return _mm512_reduce_add_epi64(a);
+}
+
+#if EIGEN_COMP_MSVC
+// MSVC's _mm512_reduce_mul_epi64 is borked, at least up to and including 1939.
+// alignas(64) int64_t data[] = { 1,1,-1,-1,1,-1,-1,-1 };
+// int64_t out = _mm512_reduce_mul_epi64(_mm512_load_epi64(data));
+// produces garbage: 4294967295. It seems to happen whenever the output is supposed to be negative.
+// Fall back to a manual approach:
+template <>
+EIGEN_STRONG_INLINE int64_t predux_mul(const Packet8l& a) {
+ Packet4l lane0 = _mm512_extracti64x4_epi64(a, 0);
+ Packet4l lane1 = _mm512_extracti64x4_epi64(a, 1);
+ return predux_mul(pmul(lane0, lane1));
+}
+#else
+template <>
+EIGEN_STRONG_INLINE int64_t predux_mul<Packet8l>(const Packet8l& a) {
+ return _mm512_reduce_mul_epi64(a);
+}
+#endif
+
+template <>
+EIGEN_STRONG_INLINE int64_t predux_min(const Packet8l& a) {
+ return _mm512_reduce_min_epi64(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE int64_t predux_max(const Packet8l& a) {
+ return _mm512_reduce_max_epi64(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet8l& a) {
+ return _mm512_reduce_or_epi64(a) != 0;
+}
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet16f -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <>
+EIGEN_STRONG_INLINE float predux(const Packet16f& a) {
+ return _mm512_reduce_add_ps(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_mul(const Packet16f& a) {
+ return _mm512_reduce_mul_ps(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_min(const Packet16f& a) {
+ return _mm512_reduce_min_ps(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_min<PropagateNumbers>(const Packet16f& a) {
+ Packet8f lane0 = _mm512_extractf32x8_ps(a, 0);
+ Packet8f lane1 = _mm512_extractf32x8_ps(a, 1);
+ return predux_min<PropagateNumbers>(pmin<PropagateNumbers>(lane0, lane1));
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_min<PropagateNaN>(const Packet16f& a) {
+ Packet8f lane0 = _mm512_extractf32x8_ps(a, 0);
+ Packet8f lane1 = _mm512_extractf32x8_ps(a, 1);
+ return predux_min<PropagateNaN>(pmin<PropagateNaN>(lane0, lane1));
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_max(const Packet16f& a) {
+ return _mm512_reduce_max_ps(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_max<PropagateNumbers>(const Packet16f& a) {
+ Packet8f lane0 = _mm512_extractf32x8_ps(a, 0);
+ Packet8f lane1 = _mm512_extractf32x8_ps(a, 1);
+ return predux_max<PropagateNumbers>(pmax<PropagateNumbers>(lane0, lane1));
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_max<PropagateNaN>(const Packet16f& a) {
+ Packet8f lane0 = _mm512_extractf32x8_ps(a, 0);
+ Packet8f lane1 = _mm512_extractf32x8_ps(a, 1);
+ return predux_max<PropagateNaN>(pmax<PropagateNaN>(lane0, lane1));
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet16f& a) {
+ return _mm512_reduce_or_epi32(_mm512_castps_si512(a)) != 0;
+}
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet8d -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <>
+EIGEN_STRONG_INLINE double predux(const Packet8d& a) {
+ return _mm512_reduce_add_pd(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_mul(const Packet8d& a) {
+ return _mm512_reduce_mul_pd(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_min(const Packet8d& a) {
+ return _mm512_reduce_min_pd(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_min<PropagateNumbers>(const Packet8d& a) {
+ Packet4d lane0 = _mm512_extractf64x4_pd(a, 0);
+ Packet4d lane1 = _mm512_extractf64x4_pd(a, 1);
+ return predux_min<PropagateNumbers>(pmin<PropagateNumbers>(lane0, lane1));
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_min<PropagateNaN>(const Packet8d& a) {
+ Packet4d lane0 = _mm512_extractf64x4_pd(a, 0);
+ Packet4d lane1 = _mm512_extractf64x4_pd(a, 1);
+ return predux_min<PropagateNaN>(pmin<PropagateNaN>(lane0, lane1));
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_max(const Packet8d& a) {
+ return _mm512_reduce_max_pd(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_max<PropagateNumbers>(const Packet8d& a) {
+ Packet4d lane0 = _mm512_extractf64x4_pd(a, 0);
+ Packet4d lane1 = _mm512_extractf64x4_pd(a, 1);
+ return predux_max<PropagateNumbers>(pmax<PropagateNumbers>(lane0, lane1));
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_max<PropagateNaN>(const Packet8d& a) {
+ Packet4d lane0 = _mm512_extractf64x4_pd(a, 0);
+ Packet4d lane1 = _mm512_extractf64x4_pd(a, 1);
+ return predux_max<PropagateNaN>(pmax<PropagateNaN>(lane0, lane1));
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet8d& a) {
+ return _mm512_reduce_or_epi64(_mm512_castpd_si512(a)) != 0;
+}
+
+#ifndef EIGEN_VECTORIZE_AVX512FP16
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet16h -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <>
+EIGEN_STRONG_INLINE half predux(const Packet16h& from) {
+ return half(predux(half2float(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE half predux_mul(const Packet16h& from) {
+ return half(predux_mul(half2float(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE half predux_min(const Packet16h& from) {
+ return half(predux_min(half2float(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE half predux_min<PropagateNumbers>(const Packet16h& from) {
+ return half(predux_min<PropagateNumbers>(half2float(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE half predux_min<PropagateNaN>(const Packet16h& from) {
+ return half(predux_min<PropagateNaN>(half2float(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE half predux_max(const Packet16h& from) {
+ return half(predux_max(half2float(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE half predux_max<PropagateNumbers>(const Packet16h& from) {
+ return half(predux_max<PropagateNumbers>(half2float(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE half predux_max<PropagateNaN>(const Packet16h& from) {
+ return half(predux_max<PropagateNaN>(half2float(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet16h& a) {
+ return predux_any<Packet8i>(a.m_val);
+}
+#endif
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet16bf -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux(const Packet16bf& from) {
+ return static_cast<bfloat16>(predux<Packet16f>(Bf16ToF32(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_mul(const Packet16bf& from) {
+ return static_cast<bfloat16>(predux_mul<Packet16f>(Bf16ToF32(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_min(const Packet16bf& from) {
+ return static_cast<bfloat16>(predux_min<Packet16f>(Bf16ToF32(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_min<PropagateNumbers>(const Packet16bf& from) {
+ return static_cast<bfloat16>(predux_min<PropagateNumbers>(Bf16ToF32(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_min<PropagateNaN>(const Packet16bf& from) {
+ return static_cast<bfloat16>(predux_min<PropagateNaN>(Bf16ToF32(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_max(const Packet16bf& from) {
+ return static_cast<bfloat16>(predux_max(Bf16ToF32(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_max<PropagateNumbers>(const Packet16bf& from) {
+ return static_cast<bfloat16>(predux_max<PropagateNumbers>(Bf16ToF32(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bfloat16 predux_max<PropagateNaN>(const Packet16bf& from) {
+ return static_cast<bfloat16>(predux_max<PropagateNaN>(Bf16ToF32(from)));
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet16bf& a) {
+ return predux_any<Packet8i>(a.m_val);
+}
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // EIGEN_REDUCTIONS_AVX512_H
diff --git a/Eigen/src/Core/arch/SSE/PacketMath.h b/Eigen/src/Core/arch/SSE/PacketMath.h
index 70d13d6..e8902cf 100644
--- a/Eigen/src/Core/arch/SSE/PacketMath.h
+++ b/Eigen/src/Core/arch/SSE/PacketMath.h
@@ -1857,220 +1857,6 @@
vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
}
-template <>
-EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a) {
- // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures
- // (from Nehalem to Haswell)
- // #ifdef EIGEN_VECTORIZE_SSE3
- // Packet4f tmp = _mm_add_ps(a, vec4f_swizzle1(a,2,3,2,3));
- // return pfirst<Packet4f>(_mm_hadd_ps(tmp, tmp));
- // #else
- Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a, a));
- return pfirst<Packet4f>(_mm_add_ss(tmp, _mm_shuffle_ps(tmp, tmp, 1)));
- // #endif
-}
-
-template <>
-EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) {
- // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures
- // (from Nehalem to Haswell)
- // #ifdef EIGEN_VECTORIZE_SSE3
- // return pfirst<Packet2d>(_mm_hadd_pd(a, a));
- // #else
- return pfirst<Packet2d>(_mm_add_sd(a, _mm_unpackhi_pd(a, a)));
- // #endif
-}
-
-template <>
-EIGEN_STRONG_INLINE int64_t predux<Packet2l>(const Packet2l& a) {
- return pfirst<Packet2l>(_mm_add_epi64(a, _mm_unpackhi_epi64(a, a)));
-}
-
-#ifdef EIGEN_VECTORIZE_SSSE3
-template <>
-EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a) {
- Packet4i tmp0 = _mm_hadd_epi32(a, a);
- return pfirst<Packet4i>(_mm_hadd_epi32(tmp0, tmp0));
-}
-template <>
-EIGEN_STRONG_INLINE uint32_t predux<Packet4ui>(const Packet4ui& a) {
- Packet4ui tmp0 = _mm_hadd_epi32(a, a);
- return pfirst<Packet4ui>(_mm_hadd_epi32(tmp0, tmp0));
-}
-#else
-template <>
-EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a) {
- Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a, a));
- return pfirst(tmp) + pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1));
-}
-template <>
-EIGEN_STRONG_INLINE uint32_t predux<Packet4ui>(const Packet4ui& a) {
- Packet4ui tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a, a));
- return pfirst(tmp) + pfirst<Packet4ui>(_mm_shuffle_epi32(tmp, 1));
-}
-#endif
-
-template <>
-EIGEN_STRONG_INLINE bool predux<Packet16b>(const Packet16b& a) {
- Packet4i tmp = _mm_or_si128(a, _mm_unpackhi_epi64(a, a));
- return (pfirst(tmp) != 0) || (pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1)) != 0);
-}
-
-// Other reduction functions:
-
-// mul
-template <>
-EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a) {
- Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a, a));
- return pfirst<Packet4f>(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp, tmp, 1)));
-}
-template <>
-EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) {
- return pfirst<Packet2d>(_mm_mul_sd(a, _mm_unpackhi_pd(a, a)));
-}
-template <>
-EIGEN_STRONG_INLINE int64_t predux_mul<Packet2l>(const Packet2l& a) {
- EIGEN_ALIGN16 int64_t aux[2];
- pstore(aux, a);
- return aux[0] * aux[1];
-}
-template <>
-EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a) {
- // after some experiments, it is seems this is the fastest way to implement it
- // for GCC (e.g., reusing pmul is very slow!)
- // TODO try to call _mm_mul_epu32 directly
- EIGEN_ALIGN16 int aux[4];
- pstore(aux, a);
- return (aux[0] * aux[1]) * (aux[2] * aux[3]);
-}
-template <>
-EIGEN_STRONG_INLINE uint32_t predux_mul<Packet4ui>(const Packet4ui& a) {
- // after some experiments, it is seems this is the fastest way to implement it
- // for GCC (eg., reusing pmul is very slow !)
- // TODO try to call _mm_mul_epu32 directly
- EIGEN_ALIGN16 uint32_t aux[4];
- pstore(aux, a);
- return (aux[0] * aux[1]) * (aux[2] * aux[3]);
-}
-
-template <>
-EIGEN_STRONG_INLINE bool predux_mul<Packet16b>(const Packet16b& a) {
- Packet4i tmp = _mm_and_si128(a, _mm_unpackhi_epi64(a, a));
- return ((pfirst<Packet4i>(tmp) == 0x01010101) && (pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1)) == 0x01010101));
-}
-
-// min
-template <>
-EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a) {
- Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a, a));
- return pfirst<Packet4f>(_mm_min_ss(tmp, _mm_shuffle_ps(tmp, tmp, 1)));
-}
-template <>
-EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a) {
- return pfirst<Packet2d>(_mm_min_sd(a, _mm_unpackhi_pd(a, a)));
-}
-template <>
-EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a) {
-#ifdef EIGEN_VECTORIZE_SSE4_1
- Packet4i tmp = _mm_min_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0, 0, 3, 2)));
- return pfirst<Packet4i>(_mm_min_epi32(tmp, _mm_shuffle_epi32(tmp, 1)));
-#else
- // after some experiments, it is seems this is the fastest way to implement it
- // for GCC (eg., it does not like using std::min after the pstore !!)
- EIGEN_ALIGN16 int aux[4];
- pstore(aux, a);
- int aux0 = aux[0] < aux[1] ? aux[0] : aux[1];
- int aux2 = aux[2] < aux[3] ? aux[2] : aux[3];
- return aux0 < aux2 ? aux0 : aux2;
-#endif // EIGEN_VECTORIZE_SSE4_1
-}
-template <>
-EIGEN_STRONG_INLINE uint32_t predux_min<Packet4ui>(const Packet4ui& a) {
-#ifdef EIGEN_VECTORIZE_SSE4_1
- Packet4ui tmp = _mm_min_epu32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0, 0, 3, 2)));
- return pfirst<Packet4ui>(_mm_min_epu32(tmp, _mm_shuffle_epi32(tmp, 1)));
-#else
- // after some experiments, it is seems this is the fastest way to implement it
- // for GCC (eg., it does not like using std::min after the pstore !!)
- EIGEN_ALIGN16 uint32_t aux[4];
- pstore(aux, a);
- uint32_t aux0 = aux[0] < aux[1] ? aux[0] : aux[1];
- uint32_t aux2 = aux[2] < aux[3] ? aux[2] : aux[3];
- return aux0 < aux2 ? aux0 : aux2;
-#endif // EIGEN_VECTORIZE_SSE4_1
-}
-
-// max
-template <>
-EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a) {
- Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a, a));
- return pfirst<Packet4f>(_mm_max_ss(tmp, _mm_shuffle_ps(tmp, tmp, 1)));
-}
-template <>
-EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a) {
- return pfirst<Packet2d>(_mm_max_sd(a, _mm_unpackhi_pd(a, a)));
-}
-template <>
-EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a) {
-#ifdef EIGEN_VECTORIZE_SSE4_1
- Packet4i tmp = _mm_max_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0, 0, 3, 2)));
- return pfirst<Packet4i>(_mm_max_epi32(tmp, _mm_shuffle_epi32(tmp, 1)));
-#else
- // after some experiments, it is seems this is the fastest way to implement it
- // for GCC (eg., it does not like using std::min after the pstore !!)
- EIGEN_ALIGN16 int aux[4];
- pstore(aux, a);
- int aux0 = aux[0] > aux[1] ? aux[0] : aux[1];
- int aux2 = aux[2] > aux[3] ? aux[2] : aux[3];
- return aux0 > aux2 ? aux0 : aux2;
-#endif // EIGEN_VECTORIZE_SSE4_1
-}
-template <>
-EIGEN_STRONG_INLINE uint32_t predux_max<Packet4ui>(const Packet4ui& a) {
-#ifdef EIGEN_VECTORIZE_SSE4_1
- Packet4ui tmp = _mm_max_epu32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0, 0, 3, 2)));
- return pfirst<Packet4ui>(_mm_max_epu32(tmp, _mm_shuffle_epi32(tmp, 1)));
-#else
- // after some experiments, it is seems this is the fastest way to implement it
- // for GCC (eg., it does not like using std::min after the pstore !!)
- EIGEN_ALIGN16 uint32_t aux[4];
- pstore(aux, a);
- uint32_t aux0 = aux[0] > aux[1] ? aux[0] : aux[1];
- uint32_t aux2 = aux[2] > aux[3] ? aux[2] : aux[3];
- return aux0 > aux2 ? aux0 : aux2;
-#endif // EIGEN_VECTORIZE_SSE4_1
-}
-
-// not needed yet
-// template<> EIGEN_STRONG_INLINE bool predux_all(const Packet4f& x)
-// {
-// return _mm_movemask_ps(x) == 0xF;
-// }
-
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet2d& x) {
- return _mm_movemask_pd(x) != 0x0;
-}
-
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x) {
- return _mm_movemask_ps(x) != 0x0;
-}
-
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet2l& x) {
- return _mm_movemask_pd(_mm_castsi128_pd(x)) != 0x0;
-}
-
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet4i& x) {
- return _mm_movemask_ps(_mm_castsi128_ps(x)) != 0x0;
-}
-template <>
-EIGEN_STRONG_INLINE bool predux_any(const Packet4ui& x) {
- return _mm_movemask_ps(_mm_castsi128_ps(x)) != 0x0;
-}
-
EIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet4f, 4>& kernel) {
_MM_TRANSPOSE4_PS(kernel.packet[0], kernel.packet[1], kernel.packet[2], kernel.packet[3]);
}
diff --git a/Eigen/src/Core/arch/SSE/Reductions.h b/Eigen/src/Core/arch/SSE/Reductions.h
new file mode 100644
index 0000000..f38df4e
--- /dev/null
+++ b/Eigen/src/Core/arch/SSE/Reductions.h
@@ -0,0 +1,324 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2025 Charlie Schlosser <cs.schlosser@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REDUCTIONS_SSE_H
+#define EIGEN_REDUCTIONS_SSE_H
+
+// IWYU pragma: private
+#include "../../InternalHeaderCheck.h"
+
+namespace Eigen {
+
+namespace internal {
+
+template <typename Packet>
+struct sse_add_wrapper {
+ static EIGEN_STRONG_INLINE Packet packetOp(const Packet& a, const Packet& b) { return padd<Packet>(a, b); }
+};
+
+template <typename Packet>
+struct sse_mul_wrapper {
+ static EIGEN_STRONG_INLINE Packet packetOp(const Packet& a, const Packet& b) { return pmul<Packet>(a, b); }
+};
+
+template <typename Packet>
+struct sse_min_wrapper {
+ static EIGEN_STRONG_INLINE Packet packetOp(const Packet& a, const Packet& b) { return pmin<Packet>(a, b); }
+};
+
+template <int NaNPropagation, typename Packet>
+struct sse_min_prop_wrapper {
+ static EIGEN_STRONG_INLINE Packet packetOp(const Packet& a, const Packet& b) {
+ return pmin<NaNPropagation, Packet>(a, b);
+ }
+};
+
+template <typename Packet>
+struct sse_max_wrapper {
+ static EIGEN_STRONG_INLINE Packet packetOp(const Packet& a, const Packet& b) { return pmax<Packet>(a, b); }
+};
+
+template <int NaNPropagation, typename Packet>
+struct sse_max_prop_wrapper {
+ static EIGEN_STRONG_INLINE Packet packetOp(const Packet& a, const Packet& b) {
+ return pmax<NaNPropagation, Packet>(a, b);
+ }
+};
+
+template <typename Packet, typename Op>
+struct sse_predux_common;
+
+template <typename Packet>
+struct sse_predux_impl : sse_predux_common<Packet, sse_add_wrapper<Packet>> {};
+
+template <typename Packet>
+struct sse_predux_mul_impl : sse_predux_common<Packet, sse_mul_wrapper<Packet>> {};
+
+template <typename Packet>
+struct sse_predux_min_impl : sse_predux_common<Packet, sse_min_wrapper<Packet>> {};
+
+template <int NaNPropagation, typename Packet>
+struct sse_predux_min_prop_impl : sse_predux_common<Packet, sse_min_prop_wrapper<NaNPropagation, Packet>> {};
+
+template <typename Packet>
+struct sse_predux_max_impl : sse_predux_common<Packet, sse_max_wrapper<Packet>> {};
+
+template <int NaNPropagation, typename Packet>
+struct sse_predux_max_prop_impl : sse_predux_common<Packet, sse_max_prop_wrapper<NaNPropagation, Packet>> {};
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet16b -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <>
+EIGEN_STRONG_INLINE bool predux(const Packet16b& a) {
+ Packet4i tmp = _mm_or_si128(a, _mm_unpackhi_epi64(a, a));
+ return (pfirst(tmp) != 0) || (pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1)) != 0);
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_mul(const Packet16b& a) {
+ Packet4i tmp = _mm_and_si128(a, _mm_unpackhi_epi64(a, a));
+ return ((pfirst<Packet4i>(tmp) == 0x01010101) && (pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1)) == 0x01010101));
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_min(const Packet16b& a) {
+ return predux_mul(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_max(const Packet16b& a) {
+ return predux(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet16b& a) {
+ return predux(a);
+}
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet4i -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <typename Op>
+struct sse_predux_common<Packet4i, Op> {
+ static EIGEN_STRONG_INLINE int run(const Packet4i& a) {
+ Packet4i tmp;
+ tmp = Op::packetOp(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0, 1, 2, 3)));
+ tmp = Op::packetOp(tmp, _mm_unpackhi_epi32(tmp, tmp));
+ return _mm_cvtsi128_si32(tmp);
+ }
+};
+
+template <>
+EIGEN_STRONG_INLINE int predux(const Packet4i& a) {
+ return sse_predux_impl<Packet4i>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE int predux_mul(const Packet4i& a) {
+ return sse_predux_mul_impl<Packet4i>::run(a);
+}
+
+#ifdef EIGEN_VECTORIZE_SSE4_1
+template <>
+EIGEN_STRONG_INLINE int predux_min(const Packet4i& a) {
+ return sse_predux_min_impl<Packet4i>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE int predux_max(const Packet4i& a) {
+ return sse_predux_max_impl<Packet4i>::run(a);
+}
+#endif
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet4i& a) {
+ return _mm_movemask_ps(_mm_castsi128_ps(a)) != 0x0;
+}
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet4ui -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <typename Op>
+struct sse_predux_common<Packet4ui, Op> {
+ static EIGEN_STRONG_INLINE uint32_t run(const Packet4ui& a) {
+ Packet4ui tmp;
+ tmp = Op::packetOp(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0, 1, 2, 3)));
+ tmp = Op::packetOp(tmp, _mm_unpackhi_epi32(tmp, tmp));
+ return static_cast<uint32_t>(_mm_cvtsi128_si32(tmp));
+ }
+};
+
+template <>
+EIGEN_STRONG_INLINE uint32_t predux(const Packet4ui& a) {
+ return sse_predux_impl<Packet4ui>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE uint32_t predux_mul(const Packet4ui& a) {
+ return sse_predux_mul_impl<Packet4ui>::run(a);
+}
+
+#ifdef EIGEN_VECTORIZE_SSE4_1
+template <>
+EIGEN_STRONG_INLINE uint32_t predux_min(const Packet4ui& a) {
+ return sse_predux_min_impl<Packet4ui>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE uint32_t predux_max(const Packet4ui& a) {
+ return sse_predux_max_impl<Packet4ui>::run(a);
+}
+#endif
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet4ui& a) {
+ return _mm_movemask_ps(_mm_castsi128_ps(a)) != 0x0;
+}
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet2l -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <typename Op>
+struct sse_predux_common<Packet2l, Op> {
+ static EIGEN_STRONG_INLINE int64_t run(const Packet2l& a) {
+ Packet2l tmp;
+ tmp = Op::packetOp(a, _mm_unpackhi_epi64(a, a));
+ return pfirst(tmp);
+ }
+};
+
+template <>
+EIGEN_STRONG_INLINE int64_t predux(const Packet2l& a) {
+ return sse_predux_impl<Packet2l>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet2l& a) {
+ return _mm_movemask_pd(_mm_castsi128_pd(a)) != 0x0;
+}
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet4f -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <typename Op>
+struct sse_predux_common<Packet4f, Op> {
+ static EIGEN_STRONG_INLINE float run(const Packet4f& a) {
+ Packet4f tmp;
+ tmp = Op::packetOp(a, _mm_movehl_ps(a, a));
+#ifdef EIGEN_VECTORIZE_SSE3
+ tmp = Op::packetOp(tmp, _mm_movehdup_ps(tmp));
+#else
+ tmp = Op::packetOp(tmp, _mm_shuffle_ps(tmp, tmp, 1));
+#endif
+ return _mm_cvtss_f32(tmp);
+ }
+};
+
+template <>
+EIGEN_STRONG_INLINE float predux(const Packet4f& a) {
+ return sse_predux_impl<Packet4f>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_mul(const Packet4f& a) {
+ return sse_predux_mul_impl<Packet4f>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_min(const Packet4f& a) {
+ return sse_predux_min_impl<Packet4f>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_min<PropagateNumbers>(const Packet4f& a) {
+ return sse_predux_min_prop_impl<PropagateNumbers, Packet4f>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_min<PropagateNaN>(const Packet4f& a) {
+ return sse_predux_min_prop_impl<PropagateNaN, Packet4f>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) {
+ return sse_predux_max_impl<Packet4f>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_max<PropagateNumbers>(const Packet4f& a) {
+ return sse_predux_max_prop_impl<PropagateNumbers, Packet4f>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE float predux_max<PropagateNaN>(const Packet4f& a) {
+ return sse_predux_max_prop_impl<PropagateNaN, Packet4f>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet4f& a) {
+ return _mm_movemask_ps(a) != 0x0;
+}
+
+/* -- -- -- -- -- -- -- -- -- -- -- -- Packet2d -- -- -- -- -- -- -- -- -- -- -- -- */
+
+template <typename Op>
+struct sse_predux_common<Packet2d, Op> {
+ static EIGEN_STRONG_INLINE double run(const Packet2d& a) {
+ Packet2d tmp;
+ tmp = Op::packetOp(a, _mm_unpackhi_pd(a, a));
+ return _mm_cvtsd_f64(tmp);
+ }
+};
+
+template <>
+EIGEN_STRONG_INLINE double predux(const Packet2d& a) {
+ return sse_predux_impl<Packet2d>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_mul(const Packet2d& a) {
+ return sse_predux_mul_impl<Packet2d>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_min(const Packet2d& a) {
+ return sse_predux_min_impl<Packet2d>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_min<PropagateNumbers>(const Packet2d& a) {
+ return sse_predux_min_prop_impl<PropagateNumbers, Packet2d>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_min<PropagateNaN>(const Packet2d& a) {
+ return sse_predux_min_prop_impl<PropagateNaN, Packet2d>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_max(const Packet2d& a) {
+ return sse_predux_max_impl<Packet2d>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_max<PropagateNumbers>(const Packet2d& a) {
+ return sse_predux_max_prop_impl<PropagateNumbers, Packet2d>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE double predux_max<PropagateNaN>(const Packet2d& a) {
+ return sse_predux_max_prop_impl<PropagateNaN, Packet2d>::run(a);
+}
+
+template <>
+EIGEN_STRONG_INLINE bool predux_any(const Packet2d& a) {
+ return _mm_movemask_pd(a) != 0x0;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_REDUCTIONS_SSE_H
diff --git a/Eigen/src/Eigenvalues/Tridiagonalization.h b/Eigen/src/Eigenvalues/Tridiagonalization.h
index 4da6d07..9cc9201 100644
--- a/Eigen/src/Eigenvalues/Tridiagonalization.h
+++ b/Eigen/src/Eigenvalues/Tridiagonalization.h
@@ -345,7 +345,7 @@
// Apply similarity transformation to remaining columns,
// i.e., A = H A H' where H = I - h v v' and v = matA.col(i).tail(n-i-1)
- matA.col(i).coeffRef(i + 1) = 1;
+ matA.col(i).coeffRef(i + 1) = Scalar(1);
hCoeffs.tail(n - i - 1).noalias() =
(matA.bottomRightCorner(remainingSize, remainingSize).template selfadjointView<Lower>() *
diff --git a/Eigen/src/Geometry/Quaternion.h b/Eigen/src/Geometry/Quaternion.h
index 147e6e3..f2d2d05 100644
--- a/Eigen/src/Geometry/Quaternion.h
+++ b/Eigen/src/Geometry/Quaternion.h
@@ -85,6 +85,29 @@
return derived().coeffs();
}
+ /** \returns a vector containing the coefficients, rearranged into the order [\c w, \c x, \c y, \c z].
+ *
+ * This is the order expected by the \code Quaternion(const Scalar& w, const Scalar& x, const Scalar& y, const Scalar&
+ * z) \endcode constructor, but not the order of the internal vector representation. Therefore, it returns a newly
+ * constructed vector.
+ *
+ * \sa QuaternionBase::coeffsScalarLast()
+ * */
+ EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Coefficients coeffsScalarFirst() const {
+ return derived().coeffsScalarFirst();
+ }
+
+ /** \returns a vector containing the coefficients in their original order [\c x, \c y, \c z, \c w].
+ *
+ * This is equivalent to \code coeffs() \endcode, but returns a newly constructed vector for uniformity with \code
+ * coeffsScalarFirst() \endcode.
+ *
+ * \sa QuaternionBase::coeffsScalarFirst()
+ * */
+ EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Coefficients coeffsScalarLast() const {
+ return derived().coeffsScalarLast();
+ }
+
/** \returns a vector expression of the coefficients (x,y,z,w) */
EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Coefficients& coeffs() { return derived().coeffs(); }
@@ -357,12 +380,23 @@
EIGEN_DEVICE_FUNC static Quaternion UnitRandom();
+ EIGEN_DEVICE_FUNC static Quaternion FromCoeffsScalarLast(const Scalar& x, const Scalar& y, const Scalar& z,
+ const Scalar& w);
+
+ EIGEN_DEVICE_FUNC static Quaternion FromCoeffsScalarFirst(const Scalar& w, const Scalar& x, const Scalar& y,
+ const Scalar& z);
+
template <typename Derived1, typename Derived2>
EIGEN_DEVICE_FUNC static Quaternion FromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b);
EIGEN_DEVICE_FUNC inline Coefficients& coeffs() { return m_coeffs; }
EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs; }
+ EIGEN_DEVICE_FUNC inline Coefficients coeffsScalarLast() const { return m_coeffs; }
+
+ EIGEN_DEVICE_FUNC inline Coefficients coeffsScalarFirst() const {
+ return {m_coeffs.w(), m_coeffs.x(), m_coeffs.y(), m_coeffs.z()};
+ }
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(NeedsAlignment))
#ifdef EIGEN_QUATERNION_PLUGIN
@@ -437,6 +471,12 @@
EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs; }
+ EIGEN_DEVICE_FUNC inline Coefficients coeffsScalarLast() const { return m_coeffs; }
+
+ EIGEN_DEVICE_FUNC inline Coefficients coeffsScalarFirst() const {
+ return {m_coeffs.w(), m_coeffs.x(), m_coeffs.y(), m_coeffs.z()};
+ }
+
protected:
const Coefficients m_coeffs;
};
@@ -473,6 +513,12 @@
EIGEN_DEVICE_FUNC inline Coefficients& coeffs() { return m_coeffs; }
EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs; }
+ EIGEN_DEVICE_FUNC inline Coefficients coeffsScalarLast() const { return m_coeffs; }
+
+ EIGEN_DEVICE_FUNC inline Coefficients coeffsScalarFirst() const {
+ return {m_coeffs.w(), m_coeffs.x(), m_coeffs.y(), m_coeffs.z()};
+ }
+
protected:
Coefficients m_coeffs;
};
@@ -694,6 +740,35 @@
return Quaternion(a * sin(u2), a * cos(u2), b * sin(u3), b * cos(u3));
}
+/** Constructs a quaternion from its coefficients in the order [\c x, \c y, \c z, \c w], i.e. vector part [\c x, \c y,
+ * \c z] first, scalar part \a w LAST.
+ *
+ * This factory accepts the parameters in the same order as the underlying coefficient vector. Consider using this
+ * factory function to make the parameter ordering explicit.
+ */
+template <typename Scalar, int Options>
+EIGEN_DEVICE_FUNC Quaternion<Scalar, Options> Quaternion<Scalar, Options>::FromCoeffsScalarLast(const Scalar& x,
+ const Scalar& y,
+ const Scalar& z,
+ const Scalar& w) {
+ return Quaternion(w, x, y, z);
+}
+
+/** Constructs a quaternion from its coefficients in the order [\c w, \c x, \c y, \c z], i.e. scalar part \a w FIRST,
+ * vector part [\c x, \c y, \c z] last.
+ *
+ * This factory accepts the parameters in the same order as the constructor \code Quaternion(const Scalar& w, const
+ * Scalar& x, const Scalar& y, const Scalar& z) \endcode. Consider using this factory function to make the parameter
+ * ordering explicit.
+ */
+template <typename Scalar, int Options>
+EIGEN_DEVICE_FUNC Quaternion<Scalar, Options> Quaternion<Scalar, Options>::FromCoeffsScalarFirst(const Scalar& w,
+ const Scalar& x,
+ const Scalar& y,
+ const Scalar& z) {
+ return Quaternion(w, x, y, z);
+}
+
/** Returns a quaternion representing a rotation between
* the two arbitrary vectors \a a and \a b. In other words, the built
* rotation represent a rotation sending the line of direction \a a
diff --git a/test/geo_quaternion.cpp b/test/geo_quaternion.cpp
index b7e4dc5..159a937 100644
--- a/test/geo_quaternion.cpp
+++ b/test/geo_quaternion.cpp
@@ -78,6 +78,19 @@
VERIFY(ss.str() == "0i + 0j + 0k + 1");
#endif
+ // Consistent handling of scalar first/last conventions regardless of Eigen's own coefficient layout
+ const Scalar w(a);
+ const Vector3 xyz(v0);
+ q1 = Quaternionx::FromCoeffsScalarFirst(w, xyz.x(), xyz.y(), xyz.z());
+ q2 = Quaternionx::FromCoeffsScalarLast(xyz.x(), xyz.y(), xyz.z(), w);
+ VERIFY_IS_EQUAL(q1, q2);
+
+ VERIFY_IS_EQUAL(q1.coeffsScalarFirst()[0], w);
+ VERIFY_IS_EQUAL(q1.coeffsScalarFirst()(seqN(1, 3)), xyz);
+
+ VERIFY_IS_EQUAL(q1.coeffsScalarLast()[3], w);
+ VERIFY_IS_EQUAL(q1.coeffsScalarLast()(seqN(0, 3)), xyz);
+
// concatenation
q1 *= q2;
diff --git a/test/redux.cpp b/test/redux.cpp
index 42c269a..c9c3978 100644
--- a/test/redux.cpp
+++ b/test/redux.cpp
@@ -37,12 +37,9 @@
m2.array() = m2.array() - kMaxVal * (m2.array() / kMaxVal);
}
- VERIFY_IS_MUCH_SMALLER_THAN(MatrixType::Zero(rows, cols).sum(), Scalar(1));
- VERIFY_IS_APPROX(
- MatrixType::Ones(rows, cols).sum(),
- Scalar(float(
- rows *
- cols))); // the float() here to shut up excessive MSVC warning about int->complex conversion being lossy
+ VERIFY_IS_EQUAL(MatrixType::Zero(rows, cols).sum(), Scalar(0));
+ Scalar sizeAsScalar = internal::cast<Index, Scalar>(rows * cols);
+ VERIFY_IS_APPROX(MatrixType::Ones(rows, cols).sum(), sizeAsScalar);
Scalar s(0), p(1), minc(numext::real(m1.coeff(0))), maxc(numext::real(m1.coeff(0)));
for (int j = 0; j < cols; j++)
for (int i = 0; i < rows; i++) {
@@ -160,6 +157,10 @@
int maxsize = (std::min)(100, EIGEN_TEST_MAX_SIZE);
TEST_SET_BUT_UNUSED_VARIABLE(maxsize);
for (int i = 0; i < g_repeat; i++) {
+ int rows = internal::random<int>(1, maxsize);
+ int cols = internal::random<int>(1, maxsize);
+ EIGEN_UNUSED_VARIABLE(rows);
+ EIGEN_UNUSED_VARIABLE(cols);
CALL_SUBTEST_1(matrixRedux(Matrix<float, 1, 1>()));
CALL_SUBTEST_1(matrixRedux(Array<float, 1, 1>()));
CALL_SUBTEST_2(matrixRedux(Matrix2f()));
@@ -168,19 +169,37 @@
CALL_SUBTEST_3(matrixRedux(Matrix4d()));
CALL_SUBTEST_3(matrixRedux(Array4d()));
CALL_SUBTEST_3(matrixRedux(Array44d()));
- CALL_SUBTEST_4(matrixRedux(MatrixXcf(internal::random<int>(1, maxsize), internal::random<int>(1, maxsize))));
- CALL_SUBTEST_4(matrixRedux(ArrayXXcf(internal::random<int>(1, maxsize), internal::random<int>(1, maxsize))));
- CALL_SUBTEST_5(matrixRedux(MatrixXd(internal::random<int>(1, maxsize), internal::random<int>(1, maxsize))));
- CALL_SUBTEST_5(matrixRedux(ArrayXXd(internal::random<int>(1, maxsize), internal::random<int>(1, maxsize))));
- CALL_SUBTEST_6(matrixRedux(MatrixXi(internal::random<int>(1, maxsize), internal::random<int>(1, maxsize))));
- CALL_SUBTEST_6(matrixRedux(ArrayXXi(internal::random<int>(1, maxsize), internal::random<int>(1, maxsize))));
+ CALL_SUBTEST_4(matrixRedux(MatrixXf(rows, cols)));
+ CALL_SUBTEST_4(matrixRedux(ArrayXXf(rows, cols)));
+ CALL_SUBTEST_4(matrixRedux(MatrixXd(rows, cols)));
+ CALL_SUBTEST_4(matrixRedux(ArrayXXd(rows, cols)));
+ /* TODO: fix test for boolean */
+ /*CALL_SUBTEST_5(matrixRedux(MatrixX<bool>(rows, cols)));*/
+ /*CALL_SUBTEST_5(matrixRedux(ArrayXX<bool>(rows, cols)));*/
+ CALL_SUBTEST_5(matrixRedux(MatrixXi(rows, cols)));
+ CALL_SUBTEST_5(matrixRedux(ArrayXXi(rows, cols)));
+ CALL_SUBTEST_5(matrixRedux(MatrixX<int64_t>(rows, cols)));
+ CALL_SUBTEST_5(matrixRedux(ArrayXX<int64_t>(rows, cols)));
+ CALL_SUBTEST_6(matrixRedux(MatrixXcf(rows, cols)));
+ CALL_SUBTEST_6(matrixRedux(ArrayXXcf(rows, cols)));
+ CALL_SUBTEST_6(matrixRedux(MatrixXcd(rows, cols)));
+ CALL_SUBTEST_6(matrixRedux(ArrayXXcd(rows, cols)));
}
for (int i = 0; i < g_repeat; i++) {
- CALL_SUBTEST_7(vectorRedux(Vector4f()));
- CALL_SUBTEST_7(vectorRedux(Array4f()));
- CALL_SUBTEST_5(vectorRedux(VectorXd(internal::random<int>(1, maxsize))));
- CALL_SUBTEST_5(vectorRedux(ArrayXd(internal::random<int>(1, maxsize))));
- CALL_SUBTEST_8(vectorRedux(VectorXf(internal::random<int>(1, maxsize))));
- CALL_SUBTEST_8(vectorRedux(ArrayXf(internal::random<int>(1, maxsize))));
+ int size = internal::random<int>(1, maxsize);
+ EIGEN_UNUSED_VARIABLE(size);
+ CALL_SUBTEST_8(vectorRedux(Vector4f()));
+ CALL_SUBTEST_8(vectorRedux(Array4f()));
+ CALL_SUBTEST_9(vectorRedux(VectorXf(size)));
+ CALL_SUBTEST_9(vectorRedux(ArrayXf(size)));
+ CALL_SUBTEST_10(vectorRedux(VectorXd(size)));
+ CALL_SUBTEST_10(vectorRedux(ArrayXd(size)));
+ /* TODO: fix test for boolean */
+ /*CALL_SUBTEST_10(vectorRedux(VectorX<bool>(size)));*/
+ /*CALL_SUBTEST_10(vectorRedux(ArrayX<bool>(size)));*/
+ CALL_SUBTEST_10(vectorRedux(VectorXi(size)));
+ CALL_SUBTEST_10(vectorRedux(ArrayXi(size)));
+ CALL_SUBTEST_10(vectorRedux(VectorX<int64_t>(size)));
+ CALL_SUBTEST_10(vectorRedux(ArrayX<int64_t>(size)));
}
}