Update Eigen to: https://gitlab.com/libeigen/eigen/-/commit/e939c06b0e54fd7c4bfa173d01b47d2554bf7a85

PiperOrigin-RevId: 419652492
Change-Id: I01a8bd6bf8feea498fd1827cacc133eb4b971d75
diff --git a/Eigen/Cholesky b/Eigen/Cholesky
index a318ceb..2c686f1 100644
--- a/Eigen/Cholesky
+++ b/Eigen/Cholesky
@@ -32,11 +32,7 @@
 #include "src/Cholesky/LLT.h"
 #include "src/Cholesky/LDLT.h"
 #ifdef EIGEN_USE_LAPACKE
-#ifdef EIGEN_USE_MKL
-#include "mkl_lapacke.h"
-#else
-#include "src/misc/lapacke.h"
-#endif
+#include "src/misc/lapacke_helpers.h"
 #include "src/Cholesky/LLT_LAPACKE.h"
 #endif
 
diff --git a/Eigen/Core b/Eigen/Core
index 5cc8344..1074332 100644
--- a/Eigen/Core
+++ b/Eigen/Core
@@ -36,7 +36,7 @@
 
 // Disable the ipa-cp-clone optimization flag with MinGW 6.x or newer (enabled by default with -O3)
 // See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=556 for details.
-#if EIGEN_COMP_MINGW && EIGEN_GNUC_AT_LEAST(4,6) && EIGEN_GNUC_AT_MOST(5,5)
+#if EIGEN_COMP_MINGW && EIGEN_GNUC_AT_MOST(5,5)
   #pragma GCC optimize ("-fno-ipa-cp-clone")
 #endif
 
@@ -98,9 +98,7 @@
 #include <array>
 
 // for std::is_nothrow_move_assignable
-#ifdef EIGEN_INCLUDE_TYPE_TRAITS
 #include <type_traits>
-#endif
 
 // for outputting debug info
 #ifdef EIGEN_DEBUG_ASSIGN
diff --git a/Eigen/LU b/Eigen/LU
index 1236ceb..b7f9a8a 100644
--- a/Eigen/LU
+++ b/Eigen/LU
@@ -28,11 +28,7 @@
 #include "src/LU/FullPivLU.h"
 #include "src/LU/PartialPivLU.h"
 #ifdef EIGEN_USE_LAPACKE
-#ifdef EIGEN_USE_MKL
-#include "mkl_lapacke.h"
-#else
-#include "src/misc/lapacke.h"
-#endif
+#include "src/misc/lapacke_helpers.h"
 #include "src/LU/PartialPivLU_LAPACKE.h"
 #endif
 #include "src/LU/Determinant.h"
diff --git a/Eigen/QR b/Eigen/QR
index 8465b62..1f6c22e 100644
--- a/Eigen/QR
+++ b/Eigen/QR
@@ -36,11 +36,7 @@
 #include "src/QR/ColPivHouseholderQR.h"
 #include "src/QR/CompleteOrthogonalDecomposition.h"
 #ifdef EIGEN_USE_LAPACKE
-#ifdef EIGEN_USE_MKL
-#include "mkl_lapacke.h"
-#else
-#include "src/misc/lapacke.h"
-#endif
+#include "src/misc/lapacke_helpers.h"
 #include "src/QR/HouseholderQR_LAPACKE.h"
 #include "src/QR/ColPivHouseholderQR_LAPACKE.h"
 #endif
diff --git a/Eigen/SPQRSupport b/Eigen/SPQRSupport
index f70390c..33c3370 100644
--- a/Eigen/SPQRSupport
+++ b/Eigen/SPQRSupport
@@ -28,7 +28,7 @@
   *
   */
 
-#include "src/CholmodSupport/CholmodSupport.h"
+#include "Eigen/CholmodSupport"
 #include "src/SPQRSupport/SuiteSparseQRSupport.h"
 
 #endif
diff --git a/Eigen/src/Cholesky/LLT_LAPACKE.h b/Eigen/src/Cholesky/LLT_LAPACKE.h
index bde9bcd..62bc679 100644
--- a/Eigen/src/Cholesky/LLT_LAPACKE.h
+++ b/Eigen/src/Cholesky/LLT_LAPACKE.h
@@ -39,44 +39,12 @@
 
 namespace internal {
 
-namespace lapacke_llt_helpers {
-
-  // -------------------------------------------------------------------------------------------------------------------
-  //        Translation from Eigen to Lapacke types
-  // -------------------------------------------------------------------------------------------------------------------
-
-  // For complex numbers, the types in Eigen and Lapacke are different, but layout compatible.
-  template<typename Scalar> struct translate_type;
-  template<> struct translate_type<float> { using type = float; };
-  template<> struct translate_type<double> { using type = double; };
-  template<> struct translate_type<dcomplex> { using type = lapack_complex_double; };
-  template<> struct translate_type<scomplex> { using type = lapack_complex_float; };
-
-  // -------------------------------------------------------------------------------------------------------------------
-  //        Dispatch for potrf handling double, float, complex double, complex float types
-  // -------------------------------------------------------------------------------------------------------------------
-
-  inline lapack_int potrf(lapack_int matrix_order, char uplo, lapack_int size, double* a, lapack_int lda) {
-    return LAPACKE_dpotrf( matrix_order, uplo, size, a, lda );
-  }
-
-  inline lapack_int potrf(lapack_int matrix_order, char uplo, lapack_int size, float* a, lapack_int lda) {
-    return LAPACKE_spotrf( matrix_order, uplo, size, a, lda );
-  }
-
-  inline lapack_int potrf(lapack_int matrix_order, char uplo, lapack_int size, lapack_complex_double* a, lapack_int lda) {
-    return LAPACKE_zpotrf( matrix_order, uplo, size, a, lda );
-  }
-
-  inline lapack_int potrf(lapack_int matrix_order, char uplo, lapack_int size, lapack_complex_float* a, lapack_int lda) {
-    return LAPACKE_cpotrf( matrix_order, uplo, size, a, lda );
-  }
-
+namespace lapacke_helpers {
   // -------------------------------------------------------------------------------------------------------------------
   //        Dispatch for rank update handling upper and lower parts
   // -------------------------------------------------------------------------------------------------------------------
 
-  template<unsigned Mode>
+  template<UpLoType Mode>
   struct rank_update {};
 
   template<>
@@ -100,9 +68,8 @@
   //        Generic lapacke llt implementation that hands of to the dispatches
   // -------------------------------------------------------------------------------------------------------------------
 
-  template<typename Scalar, unsigned Mode>
+  template<typename Scalar, UpLoType Mode>
   struct lapacke_llt {
-    using BlasType = typename translate_type<Scalar>::type;
     template<typename MatrixType>
     static Index blocked(MatrixType& m)
     {
@@ -110,15 +77,13 @@
       if(m.rows() == 0) {
         return -1;
       }
-
       /* Set up parameters for ?potrf */
-      lapack_int size = convert_index<lapack_int>(m.rows());
-      lapack_int StorageOrder = MatrixType::Flags&RowMajorBit?RowMajor:ColMajor;
-      lapack_int matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR;
+      lapack_int size = to_lapack(m.rows());
+      lapack_int matrix_order = lapack_storage_of(m);
       Scalar* a = &(m.coeffRef(0,0));
-      lapack_int lda = convert_index<lapack_int>(m.outerStride());
+      lapack_int lda = to_lapack(m.outerStride());
 
-      lapack_int info = potrf( matrix_order, Mode == Lower ? 'L' : 'U', size, (BlasType*)a, lda );
+      lapack_int info = potrf(matrix_order, translate_mode<Mode>, size, to_lapack(a), lda );
       info = (info==0) ? -1 : info>0 ? info-1 : size;
       return info;
     }
@@ -130,7 +95,7 @@
     }
   };
 }
-// end namespace lapacke_llt_helpers
+// end namespace lapacke_helpers
 
 /*
  * Here, we just put the generic implementation from lapacke_llt into a full specialization of the llt_inplace
@@ -139,13 +104,13 @@
  */
 
 #define EIGEN_LAPACKE_LLT(EIGTYPE) \
-template<> struct llt_inplace<EIGTYPE, Lower> : public lapacke_llt_helpers::lapacke_llt<EIGTYPE, Lower> {}; \
-template<> struct llt_inplace<EIGTYPE, Upper> : public lapacke_llt_helpers::lapacke_llt<EIGTYPE, Upper> {};
+template<> struct llt_inplace<EIGTYPE, Lower> : public lapacke_helpers::lapacke_llt<EIGTYPE, Lower> {}; \
+template<> struct llt_inplace<EIGTYPE, Upper> : public lapacke_helpers::lapacke_llt<EIGTYPE, Upper> {};
 
 EIGEN_LAPACKE_LLT(double)
 EIGEN_LAPACKE_LLT(float)
-EIGEN_LAPACKE_LLT(dcomplex)
-EIGEN_LAPACKE_LLT(scomplex)
+EIGEN_LAPACKE_LLT(std::complex<double>)
+EIGEN_LAPACKE_LLT(std::complex<float>)
 
 #undef EIGEN_LAPACKE_LLT
 
diff --git a/Eigen/src/Core/ArithmeticSequence.h b/Eigen/src/Core/ArithmeticSequence.h
index 1c8b670..112ca98 100644
--- a/Eigen/src/Core/ArithmeticSequence.h
+++ b/Eigen/src/Core/ArithmeticSequence.h
@@ -16,59 +16,6 @@
 
 namespace internal {
 
-#if !((!EIGEN_COMP_GNUC) || EIGEN_COMP_GNUC>=48)
-template<typename T> struct aseq_negate {};
-
-template<> struct aseq_negate<Index> {
-  typedef Index type;
-};
-
-template<int N> struct aseq_negate<FixedInt<N> > {
-  typedef FixedInt<-N> type;
-};
-
-// Compilation error in the following case:
-template<> struct aseq_negate<FixedInt<DynamicIndex> > {};
-
-template<typename FirstType,typename SizeType,typename IncrType,
-         bool FirstIsSymbolic=symbolic::is_symbolic<FirstType>::value,
-         bool SizeIsSymbolic =symbolic::is_symbolic<SizeType>::value>
-struct aseq_reverse_first_type {
-  typedef Index type;
-};
-
-template<typename FirstType,typename SizeType,typename IncrType>
-struct aseq_reverse_first_type<FirstType,SizeType,IncrType,true,true> {
-  typedef symbolic::AddExpr<FirstType,
-                            symbolic::ProductExpr<symbolic::AddExpr<SizeType,symbolic::ValueExpr<FixedInt<-1> > >,
-                                                  symbolic::ValueExpr<IncrType> >
-                           > type;
-};
-
-template<typename SizeType,typename IncrType,typename EnableIf = void>
-struct aseq_reverse_first_type_aux {
-  typedef Index type;
-};
-
-template<typename SizeType,typename IncrType>
-struct aseq_reverse_first_type_aux<SizeType,IncrType,typename internal::enable_if<bool((SizeType::value+IncrType::value)|0x1)>::type> {
-  typedef FixedInt<(SizeType::value-1)*IncrType::value> type;
-};
-
-template<typename FirstType,typename SizeType,typename IncrType>
-struct aseq_reverse_first_type<FirstType,SizeType,IncrType,true,false> {
-  typedef typename aseq_reverse_first_type_aux<SizeType,IncrType>::type Aux;
-  typedef symbolic::AddExpr<FirstType,symbolic::ValueExpr<Aux> > type;
-};
-
-template<typename FirstType,typename SizeType,typename IncrType>
-struct aseq_reverse_first_type<FirstType,SizeType,IncrType,false,true> {
-  typedef symbolic::AddExpr<symbolic::ProductExpr<symbolic::AddExpr<SizeType,symbolic::ValueExpr<FixedInt<-1> > >,
-                                                  symbolic::ValueExpr<IncrType> >,
-                            symbolic::ValueExpr<> > type;
-};
-#endif
-
 // Helper to cleanup the type of the increment:
 template<typename T> struct cleanup_seq_incr {
   typedef typename cleanup_index_type<T,DynamicIndex>::type type;
@@ -139,21 +86,9 @@
   IncrType  m_incr;
 
 public:
-
-#if (!EIGEN_COMP_GNUC) || EIGEN_COMP_GNUC>=48
   auto reverse() const -> decltype(Eigen::seqN(m_first+(m_size+fix<-1>())*m_incr,m_size,-m_incr)) {
     return seqN(m_first+(m_size+fix<-1>())*m_incr,m_size,-m_incr);
   }
-#else
-protected:
-  typedef typename internal::aseq_negate<IncrType>::type ReverseIncrType;
-  typedef typename internal::aseq_reverse_first_type<FirstType,SizeType,IncrType>::type ReverseFirstType;
-public:
-  ArithmeticSequence<ReverseFirstType,SizeType,ReverseIncrType>
-  reverse() const {
-    return seqN(m_first+(m_size+fix<-1>())*m_incr,m_size,-m_incr);
-  }
-#endif
 };
 
 /** \returns an ArithmeticSequence starting at \a first, of length \a size, and increment \a incr
diff --git a/Eigen/src/Core/AssignEvaluator.h b/Eigen/src/Core/AssignEvaluator.h
index 5056328..2c00387 100644
--- a/Eigen/src/Core/AssignEvaluator.h
+++ b/Eigen/src/Core/AssignEvaluator.h
@@ -42,7 +42,7 @@
     DstAlignment = DstEvaluator::Alignment,
     SrcAlignment = SrcEvaluator::Alignment,
     DstHasDirectAccess = (DstFlags & DirectAccessBit) == DirectAccessBit,
-    JointAlignment = EIGEN_PLAIN_ENUM_MIN(DstAlignment,SrcAlignment)
+    JointAlignment = plain_enum_min(DstAlignment, SrcAlignment)
   };
 
 private:
@@ -53,8 +53,8 @@
     InnerMaxSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime)
               : int(DstFlags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime)
               : int(Dst::MaxRowsAtCompileTime),
-    RestrictedInnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(InnerSize,MaxPacketSize),
-    RestrictedLinearSize = EIGEN_SIZE_MIN_PREFER_FIXED(Dst::SizeAtCompileTime,MaxPacketSize),
+    RestrictedInnerSize = min_size_prefer_fixed(InnerSize, MaxPacketSize),
+    RestrictedLinearSize = min_size_prefer_fixed(Dst::SizeAtCompileTime, MaxPacketSize),
     OuterStride = int(outer_stride_at_compile_time<Dst>::ret),
     MaxSizeAtCompileTime = Dst::SizeAtCompileTime
   };
diff --git a/Eigen/src/Core/BandMatrix.h b/Eigen/src/Core/BandMatrix.h
index 69b7681..a8d8b19 100644
--- a/Eigen/src/Core/BandMatrix.h
+++ b/Eigen/src/Core/BandMatrix.h
@@ -43,7 +43,7 @@
       DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic))
                             ? 1 + Supers + Subs
                             : Dynamic,
-      SizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime)
+      SizeAtCompileTime = min_size_prefer_dynamic(RowsAtCompileTime,ColsAtCompileTime)
     };
 
   public:
@@ -98,8 +98,8 @@
         DiagonalSize = (RowsAtCompileTime==Dynamic || ColsAtCompileTime==Dynamic)
                      ? Dynamic
                      : (ActualIndex<0
-                     ? EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime, RowsAtCompileTime + ActualIndex)
-                     : EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime - ActualIndex))
+                     ? min_size_prefer_dynamic(ColsAtCompileTime, RowsAtCompileTime + ActualIndex)
+                     : min_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime - ActualIndex))
       };
       typedef Block<CoefficientsType,1, DiagonalSize> BuildType;
       typedef typename internal::conditional<Conjugate,
diff --git a/Eigen/src/Core/CoreEvaluators.h b/Eigen/src/Core/CoreEvaluators.h
index 1dcd2f8..4a20312 100644
--- a/Eigen/src/Core/CoreEvaluators.h
+++ b/Eigen/src/Core/CoreEvaluators.h
@@ -657,9 +657,9 @@
         )
      ),
     Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit),
-    Alignment = EIGEN_PLAIN_ENUM_MIN(
-        EIGEN_PLAIN_ENUM_MIN(evaluator<Arg1>::Alignment, evaluator<Arg2>::Alignment),
-        evaluator<Arg3>::Alignment)
+    Alignment = plain_enum_min(
+            plain_enum_min(evaluator<Arg1>::Alignment, evaluator<Arg2>::Alignment),
+            evaluator<Arg3>::Alignment)
   };
 
   EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr) : m_d(xpr)
@@ -753,7 +753,7 @@
         )
      ),
     Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
-    Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<Lhs>::Alignment,evaluator<Rhs>::Alignment)
+    Alignment = plain_enum_min(evaluator<Lhs>::Alignment, evaluator<Rhs>::Alignment)
   };
 
   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@@ -902,7 +902,7 @@
       m_innerStride(map.innerStride()),
       m_outerStride(map.outerStride())
   {
-    EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(evaluator<Derived>::Flags&PacketAccessBit, internal::inner_stride_at_compile_time<Derived>::ret==1),
+    EIGEN_STATIC_ASSERT(check_implication(evaluator<Derived>::Flags&PacketAccessBit, internal::inner_stride_at_compile_time<Derived>::ret==1),
                         PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
   }
@@ -1074,7 +1074,7 @@
     Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic)
                              && (OuterStrideAtCompileTime!=0)
                              && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0,
-    Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ArgType>::Alignment, Alignment0)
+    Alignment = plain_enum_min(evaluator<ArgType>::Alignment, Alignment0)
   };
   typedef block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> block_evaluator_type;
   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@@ -1225,7 +1225,7 @@
     : mapbase_evaluator<XprType, typename XprType::PlainObject>(block)
   {
     // TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime
-    eigen_assert(((internal::UIntPtr(block.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator<XprType>::Alignment)) == 0) && "data is not aligned");
+    eigen_assert(((internal::UIntPtr(block.data()) % plain_enum_max(1,evaluator<XprType>::Alignment)) == 0) && "data is not aligned");
   }
 };
 
@@ -1241,12 +1241,12 @@
   typedef Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> XprType;
   enum {
     CoeffReadCost = evaluator<ConditionMatrixType>::CoeffReadCost
-                  + EIGEN_PLAIN_ENUM_MAX(evaluator<ThenMatrixType>::CoeffReadCost,
-                                         evaluator<ElseMatrixType>::CoeffReadCost),
+                  + plain_enum_max(evaluator<ThenMatrixType>::CoeffReadCost,
+                                             evaluator<ElseMatrixType>::CoeffReadCost),
 
     Flags = (unsigned int)evaluator<ThenMatrixType>::Flags & evaluator<ElseMatrixType>::Flags & HereditaryBits,
 
-    Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ThenMatrixType>::Alignment, evaluator<ElseMatrixType>::Alignment)
+    Alignment = plain_enum_min(evaluator<ThenMatrixType>::Alignment, evaluator<ElseMatrixType>::Alignment)
   };
 
   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
diff --git a/Eigen/src/Core/DenseBase.h b/Eigen/src/Core/DenseBase.h
index 439322d..d62c851 100644
--- a/Eigen/src/Core/DenseBase.h
+++ b/Eigen/src/Core/DenseBase.h
@@ -674,8 +674,8 @@
        * Only do it when debugging Eigen, as this borders on paranoia and could slow compilation down
        */
 #ifdef EIGEN_INTERNAL_DEBUGGING
-      EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor))
-                        && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, int(!IsRowMajor))),
+      EIGEN_STATIC_ASSERT((internal::check_implication(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor))
+                        && internal::check_implication(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, int(!IsRowMajor))),
                           INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION)
 #endif
     }
diff --git a/Eigen/src/Core/DenseStorage.h b/Eigen/src/Core/DenseStorage.h
index 04afd72..371da3c 100644
--- a/Eigen/src/Core/DenseStorage.h
+++ b/Eigen/src/Core/DenseStorage.h
@@ -62,7 +62,7 @@
 
 #if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
   #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask)
-#elif EIGEN_GNUC_AT_LEAST(4,7)
+#elif EIGEN_COMP_GNUC
   // GCC 4.7 is too aggressive in its optimizations and remove the alignment test based on the fact the array is declared to be aligned.
   // See this bug report: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53900
   // Hiding the origin of the array pointer behind a function argument seems to do the trick even if the function is inlined:
diff --git a/Eigen/src/Core/Diagonal.h b/Eigen/src/Core/Diagonal.h
index 7564c4c..6d8df3d 100644
--- a/Eigen/src/Core/Diagonal.h
+++ b/Eigen/src/Core/Diagonal.h
@@ -44,14 +44,14 @@
   typedef typename MatrixType::StorageKind StorageKind;
   enum {
     RowsAtCompileTime = (int(DiagIndex) == DynamicIndex || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic
-                      : (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),
-                                              MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),
+                      : (plain_enum_min(MatrixType::RowsAtCompileTime - plain_enum_max(-DiagIndex, 0),
+                                        MatrixType::ColsAtCompileTime - plain_enum_max( DiagIndex, 0))),
     ColsAtCompileTime = 1,
     MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic
-                         : DiagIndex == DynamicIndex ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime,
-                                                                              MatrixType::MaxColsAtCompileTime)
-                         : (EIGEN_PLAIN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),
-                                                 MatrixType::MaxColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),
+                         : DiagIndex == DynamicIndex ? min_size_prefer_fixed(MatrixType::MaxRowsAtCompileTime,
+                                                                             MatrixType::MaxColsAtCompileTime)
+                         : (plain_enum_min(MatrixType::MaxRowsAtCompileTime - plain_enum_max(-DiagIndex, 0),
+                                           MatrixType::MaxColsAtCompileTime - plain_enum_max( DiagIndex, 0))),
     MaxColsAtCompileTime = 1,
     MaskLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
     Flags = (unsigned int)_MatrixTypeNested::Flags & (RowMajorBit | MaskLvalueBit | DirectAccessBit) & ~RowMajorBit, // FIXME DirectAccessBit should not be handled by expressions
diff --git a/Eigen/src/Core/GeneralProduct.h b/Eigen/src/Core/GeneralProduct.h
index 251b5d4..33b667d 100644
--- a/Eigen/src/Core/GeneralProduct.h
+++ b/Eigen/src/Core/GeneralProduct.h
@@ -59,10 +59,10 @@
     Rows    = traits<_Lhs>::RowsAtCompileTime,
     MaxCols = traits<_Rhs>::MaxColsAtCompileTime,
     Cols    = traits<_Rhs>::ColsAtCompileTime,
-    MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(traits<_Lhs>::MaxColsAtCompileTime,
-                                           traits<_Rhs>::MaxRowsAtCompileTime),
-    Depth = EIGEN_SIZE_MIN_PREFER_FIXED(traits<_Lhs>::ColsAtCompileTime,
-                                        traits<_Rhs>::RowsAtCompileTime)
+    MaxDepth = min_size_prefer_fixed(traits<_Lhs>::MaxColsAtCompileTime,
+                                     traits<_Rhs>::MaxRowsAtCompileTime),
+    Depth = min_size_prefer_fixed(traits<_Lhs>::ColsAtCompileTime,
+                                  traits<_Rhs>::RowsAtCompileTime)
   };
 
   // the splitting into different lines of code here, introducing the _select enums and the typedef below,
@@ -182,12 +182,13 @@
     PacketSize      = internal::packet_traits<Scalar>::size
   };
   #if EIGEN_MAX_STATIC_ALIGN_BYTES!=0
-  internal::plain_array<Scalar,EIGEN_SIZE_MIN_PREFER_FIXED(Size,MaxSize),0,EIGEN_PLAIN_ENUM_MIN(AlignedMax,PacketSize)> m_data;
+  internal::plain_array<Scalar, internal::min_size_prefer_fixed(Size, MaxSize), 0,
+                        internal::plain_enum_min(AlignedMax, PacketSize)> m_data;
   EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; }
   #else
   // Some architectures cannot align on the stack,
   // => let's manually enforce alignment by allocating more data and return the address of the first aligned element.
-  internal::plain_array<Scalar,EIGEN_SIZE_MIN_PREFER_FIXED(Size,MaxSize)+(ForceAlignment?EIGEN_MAX_ALIGN_BYTES:0),0> m_data;
+  internal::plain_array<Scalar, internal::min_size_prefer_fixed(Size, MaxSize)+(ForceAlignment?EIGEN_MAX_ALIGN_BYTES:0),0> m_data;
   EIGEN_STRONG_INLINE Scalar* data() {
     return ForceAlignment
             ? reinterpret_cast<Scalar*>((internal::UIntPtr(m_data.array) & ~(std::size_t(EIGEN_MAX_ALIGN_BYTES-1))) + EIGEN_MAX_ALIGN_BYTES)
@@ -225,7 +226,7 @@
     typedef internal::blas_traits<Rhs> RhsBlasTraits;
     typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
   
-    typedef Map<Matrix<ResScalar,Dynamic,1>, EIGEN_PLAIN_ENUM_MIN(AlignedMax,internal::packet_traits<ResScalar>::size)> MappedDest;
+    typedef Map<Matrix<ResScalar,Dynamic,1>, plain_enum_min(AlignedMax, internal::packet_traits<ResScalar>::size)> MappedDest;
 
     ActualLhsType actualLhs = LhsBlasTraits::extract(lhs);
     ActualRhsType actualRhs = RhsBlasTraits::extract(rhs);
diff --git a/Eigen/src/Core/GlobalFunctions.h b/Eigen/src/Core/GlobalFunctions.h
index c6d36ea..53f9dfa 100644
--- a/Eigen/src/Core/GlobalFunctions.h
+++ b/Eigen/src/Core/GlobalFunctions.h
@@ -116,10 +116,9 @@
 #else
   template <typename Derived,typename ScalarExponent>
   EIGEN_DEVICE_FUNC inline
-  EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(
     const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename internal::promote_scalar_arg<typename Derived::Scalar
                                                  EIGEN_COMMA ScalarExponent EIGEN_COMMA
-                                                 EIGEN_SCALAR_BINARY_SUPPORTED(pow,typename Derived::Scalar,ScalarExponent)>::type,pow))
+                                                 EIGEN_SCALAR_BINARY_SUPPORTED(pow,typename Derived::Scalar,ScalarExponent)>::type,pow)
   pow(const Eigen::ArrayBase<Derived>& x, const ScalarExponent& exponent)
   {
     typedef typename internal::promote_scalar_arg<typename Derived::Scalar,ScalarExponent,
@@ -170,10 +169,9 @@
 #else
   template <typename Scalar, typename Derived>
   EIGEN_DEVICE_FUNC inline
-  EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(
     const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename internal::promote_scalar_arg<typename Derived::Scalar
                                                  EIGEN_COMMA Scalar EIGEN_COMMA
-                                                 EIGEN_SCALAR_BINARY_SUPPORTED(pow,Scalar,typename Derived::Scalar)>::type,Derived,pow))
+                                                 EIGEN_SCALAR_BINARY_SUPPORTED(pow,Scalar,typename Derived::Scalar)>::type,Derived,pow)
   pow(const Scalar& x, const Eigen::ArrayBase<Derived>& exponents) {
     typedef typename internal::promote_scalar_arg<typename Derived::Scalar,Scalar,
                                                   EIGEN_SCALAR_BINARY_SUPPORTED(pow,Scalar,typename Derived::Scalar)>::type PromotedScalar;
diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h
index 957cca7..55e3159 100644
--- a/Eigen/src/Core/MathFunctions.h
+++ b/Eigen/src/Core/MathFunctions.h
@@ -21,15 +21,6 @@
 
 namespace Eigen {
 
-// On WINCE, std::abs is defined for int only, so let's defined our own overloads:
-// This issue has been confirmed with MSVC 2008 only, but the issue might exist for more recent versions too.
-#if EIGEN_OS_WINCE && EIGEN_COMP_MSVC && EIGEN_COMP_MSVC<=1500
-long        abs(long        x) { return (labs(x));  }
-double      abs(double      x) { return (fabs(x));  }
-float       abs(float       x) { return (fabsf(x)); }
-long double abs(long double x) { return (fabsl(x)); }
-#endif
-
 namespace internal {
 
 /** \internal \class global_math_functions_filtering_base
@@ -925,8 +916,8 @@
 #else
     enum { rand_bits = meta_floor_log2<(unsigned int)(RAND_MAX)+1>::value,
            scalar_bits = sizeof(Scalar) * CHAR_BIT,
-           shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits)),
-           offset = NumTraits<Scalar>::IsSigned ? (1 << (EIGEN_PLAIN_ENUM_MIN(rand_bits,scalar_bits)-1)) : 0
+           shift = plain_enum_max(0, int(rand_bits) - int(scalar_bits)),
+           offset = NumTraits<Scalar>::IsSigned ? (1 << (plain_enum_min(rand_bits, scalar_bits)-1)) : 0
     };
     return Scalar((std::rand() >> shift) - offset);
 #endif
@@ -963,7 +954,7 @@
 // Implementation of is* functions
 
 // std::is* do not work with fast-math and gcc, std::is* are available on MSVC 2013 and newer, as well as in clang.
-#if (EIGEN_HAS_CXX11_MATH && !(EIGEN_COMP_GNUC_STRICT && __FINITE_MATH_ONLY__)) || (EIGEN_COMP_MSVC>=1800) || (EIGEN_COMP_CLANG)
+#if (EIGEN_HAS_CXX11_MATH && !(EIGEN_COMP_GNUC_STRICT && __FINITE_MATH_ONLY__)) || (EIGEN_COMP_MSVC) || (EIGEN_COMP_CLANG)
 #define EIGEN_USE_STD_FPCLASSIFY 1
 #else
 #define EIGEN_USE_STD_FPCLASSIFY 0
@@ -1049,7 +1040,7 @@
 
 #elif (defined __FINITE_MATH_ONLY__ && __FINITE_MATH_ONLY__ && EIGEN_COMP_GNUC)
 
-#if EIGEN_GNUC_AT_LEAST(5,0)
+#if EIGEN_COMP_GNUC
   #define EIGEN_TMP_NOOPT_ATTRIB EIGEN_DEVICE_FUNC inline __attribute__((optimize("no-finite-math-only")))
 #else
   // NOTE the inline qualifier and noinline attribute are both needed: the former is to avoid linking issue (duplicate symbol),
diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h
index 3552d5a..70d0cf7 100644
--- a/Eigen/src/Core/MatrixBase.h
+++ b/Eigen/src/Core/MatrixBase.h
@@ -94,8 +94,8 @@
 
 #ifndef EIGEN_PARSED_BY_DOXYGEN
     /** type of the equivalent square matrix */
-    typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
-                          EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
+    typedef Matrix<Scalar, internal::max_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime),
+                           internal::max_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime)> SquareMatrixType;
 #endif // not EIGEN_PARSED_BY_DOXYGEN
 
     /** \returns the size of the main diagonal, which is min(rows(),cols()).
diff --git a/Eigen/src/Core/NumTraits.h b/Eigen/src/Core/NumTraits.h
index 63ba416..e484bb6 100644
--- a/Eigen/src/Core/NumTraits.h
+++ b/Eigen/src/Core/NumTraits.h
@@ -85,12 +85,10 @@
 // TODO: Replace by std::bit_cast (available in C++20)
 template <typename Tgt, typename Src>
 EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Tgt bit_cast(const Src& src) {
-#if EIGEN_HAS_TYPE_TRAITS
   // The behaviour of memcpy is not specified for non-trivially copyable types
   EIGEN_STATIC_ASSERT(std::is_trivially_copyable<Src>::value, THIS_TYPE_IS_NOT_SUPPORTED);
   EIGEN_STATIC_ASSERT(std::is_trivially_copyable<Tgt>::value && std::is_default_constructible<Tgt>::value,
                       THIS_TYPE_IS_NOT_SUPPORTED);
-#endif
   EIGEN_STATIC_ASSERT(sizeof(Src) == sizeof(Tgt), THIS_TYPE_IS_NOT_SUPPORTED);
 
   Tgt tgt;
diff --git a/Eigen/src/Core/PlainObjectBase.h b/Eigen/src/Core/PlainObjectBase.h
index 89960b9..4367ea5 100644
--- a/Eigen/src/Core/PlainObjectBase.h
+++ b/Eigen/src/Core/PlainObjectBase.h
@@ -136,8 +136,8 @@
     enum { NeedsToAlign = (SizeAtCompileTime != Dynamic) && (internal::traits<Derived>::Alignment>0) };
     EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
 
-    EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, (int(Options)&RowMajor)==RowMajor), INVALID_MATRIX_TEMPLATE_PARAMETERS)
-    EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, (int(Options)&RowMajor)==0), INVALID_MATRIX_TEMPLATE_PARAMETERS)
+    EIGEN_STATIC_ASSERT(internal::check_implication(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, (int(Options)&RowMajor)==RowMajor), INVALID_MATRIX_TEMPLATE_PARAMETERS)
+    EIGEN_STATIC_ASSERT(internal::check_implication(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, (int(Options)&RowMajor)==0), INVALID_MATRIX_TEMPLATE_PARAMETERS)
     EIGEN_STATIC_ASSERT((RowsAtCompileTime == Dynamic) || (RowsAtCompileTime >= 0), INVALID_MATRIX_TEMPLATE_PARAMETERS)
     EIGEN_STATIC_ASSERT((ColsAtCompileTime == Dynamic) || (ColsAtCompileTime >= 0), INVALID_MATRIX_TEMPLATE_PARAMETERS)
     EIGEN_STATIC_ASSERT((MaxRowsAtCompileTime == Dynamic) || (MaxRowsAtCompileTime >= 0), INVALID_MATRIX_TEMPLATE_PARAMETERS)
@@ -282,10 +282,10 @@
     EIGEN_DEVICE_FUNC
     EIGEN_STRONG_INLINE void resize(Index rows, Index cols)
     {
-      eigen_assert(EIGEN_IMPLIES(RowsAtCompileTime!=Dynamic,rows==RowsAtCompileTime)
-                   && EIGEN_IMPLIES(ColsAtCompileTime!=Dynamic,cols==ColsAtCompileTime)
-                   && EIGEN_IMPLIES(RowsAtCompileTime==Dynamic && MaxRowsAtCompileTime!=Dynamic,rows<=MaxRowsAtCompileTime)
-                   && EIGEN_IMPLIES(ColsAtCompileTime==Dynamic && MaxColsAtCompileTime!=Dynamic,cols<=MaxColsAtCompileTime)
+      eigen_assert(internal::check_implication(RowsAtCompileTime!=Dynamic, rows==RowsAtCompileTime)
+                   && internal::check_implication(ColsAtCompileTime!=Dynamic, cols==ColsAtCompileTime)
+                   && internal::check_implication(RowsAtCompileTime==Dynamic && MaxRowsAtCompileTime!=Dynamic, rows<=MaxRowsAtCompileTime)
+                   && internal::check_implication(ColsAtCompileTime==Dynamic && MaxColsAtCompileTime!=Dynamic, cols<=MaxColsAtCompileTime)
                    && rows>=0 && cols>=0 && "Invalid sizes when resizing a matrix or array.");
       internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(rows, cols);
       #ifdef EIGEN_INITIALIZE_COEFFS
@@ -983,11 +983,7 @@
 template <typename Derived, typename OtherDerived, bool IsVector>
 struct conservative_resize_like_impl
 {
-  #if EIGEN_HAS_TYPE_TRAITS
   static const bool IsRelocatable = std::is_trivially_copyable<typename Derived::Scalar>::value;
-  #else
-  static const bool IsRelocatable = !NumTraits<typename Derived::Scalar>::RequireInitialization;
-  #endif
   static void run(DenseBase<Derived>& _this, Index rows, Index cols)
   {
     if (_this.rows() == rows && _this.cols() == cols) return;
diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h
index 545fdb9..3b788b3 100644
--- a/Eigen/src/Core/Product.h
+++ b/Eigen/src/Core/Product.h
@@ -42,7 +42,7 @@
     MaxColsAtCompileTime = RhsTraits::MaxColsAtCompileTime,
 
     // FIXME: only needed by GeneralMatrixMatrixTriangular
-    InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(LhsTraits::ColsAtCompileTime, RhsTraits::RowsAtCompileTime),
+    InnerSize = min_size_prefer_fixed(LhsTraits::ColsAtCompileTime, RhsTraits::RowsAtCompileTime),
 
     // The storage order is somewhat arbitrary here. The correct one will be determined through the evaluator.
     Flags = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? RowMajorBit
diff --git a/Eigen/src/Core/ProductEvaluators.h b/Eigen/src/Core/ProductEvaluators.h
index 42e92c2..aff3572 100644
--- a/Eigen/src/Core/ProductEvaluators.h
+++ b/Eigen/src/Core/ProductEvaluators.h
@@ -537,7 +537,7 @@
   enum {
     RowsAtCompileTime = LhsNestedCleaned::RowsAtCompileTime,
     ColsAtCompileTime = RhsNestedCleaned::ColsAtCompileTime,
-    InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(LhsNestedCleaned::ColsAtCompileTime, RhsNestedCleaned::RowsAtCompileTime),
+    InnerSize = min_size_prefer_fixed(LhsNestedCleaned::ColsAtCompileTime, RhsNestedCleaned::RowsAtCompileTime),
     MaxRowsAtCompileTime = LhsNestedCleaned::MaxRowsAtCompileTime,
     MaxColsAtCompileTime = RhsNestedCleaned::MaxColsAtCompileTime
   };
@@ -566,8 +566,8 @@
     RhsVecPacketSize = unpacket_traits<RhsVecPacketType>::size,
 
     // Here, we don't care about alignment larger than the usable packet size.
-    LhsAlignment = EIGEN_PLAIN_ENUM_MIN(LhsEtorType::Alignment,LhsVecPacketSize*int(sizeof(typename LhsNestedCleaned::Scalar))),
-    RhsAlignment = EIGEN_PLAIN_ENUM_MIN(RhsEtorType::Alignment,RhsVecPacketSize*int(sizeof(typename RhsNestedCleaned::Scalar))),
+    LhsAlignment = plain_enum_min(LhsEtorType::Alignment, LhsVecPacketSize*int(sizeof(typename LhsNestedCleaned::Scalar))),
+    RhsAlignment = plain_enum_min(RhsEtorType::Alignment, RhsVecPacketSize*int(sizeof(typename RhsNestedCleaned::Scalar))),
 
     SameType = is_same<typename LhsNestedCleaned::Scalar,typename RhsNestedCleaned::Scalar>::value,
 
@@ -587,8 +587,8 @@
     LhsOuterStrideBytes = int(LhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename LhsNestedCleaned::Scalar)),
     RhsOuterStrideBytes = int(RhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename RhsNestedCleaned::Scalar)),
 
-    Alignment = bool(CanVectorizeLhs) ? (LhsOuterStrideBytes<=0 || (int(LhsOuterStrideBytes) % EIGEN_PLAIN_ENUM_MAX(1,LhsAlignment))!=0 ? 0 : LhsAlignment)
-              : bool(CanVectorizeRhs) ? (RhsOuterStrideBytes<=0 || (int(RhsOuterStrideBytes) % EIGEN_PLAIN_ENUM_MAX(1,RhsAlignment))!=0 ? 0 : RhsAlignment)
+    Alignment = bool(CanVectorizeLhs) ? (LhsOuterStrideBytes<=0 || (int(LhsOuterStrideBytes) % plain_enum_max(1, LhsAlignment))!=0 ? 0 : LhsAlignment)
+              : bool(CanVectorizeRhs) ? (RhsOuterStrideBytes<=0 || (int(RhsOuterStrideBytes) % plain_enum_max(1, RhsAlignment))!=0 ? 0 : RhsAlignment)
               : 0,
 
     /* CanVectorizeInner deserves special explanation. It does not affect the product flags. It is not used outside
@@ -889,7 +889,7 @@
   {
     enum {
       InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime,
-      DiagonalPacketLoadMode = EIGEN_PLAIN_ENUM_MIN(LoadMode,((InnerSize%16) == 0) ? int(Aligned16) : int(evaluator<DiagonalType>::Alignment)) // FIXME hardcoded 16!!
+      DiagonalPacketLoadMode = plain_enum_min(LoadMode,((InnerSize%16) == 0) ? int(Aligned16) : int(evaluator<DiagonalType>::Alignment)) // FIXME hardcoded 16!!
     };
     return internal::pmul(m_matImpl.template packet<LoadMode,PacketType>(row, col),
                           m_diagImpl.template packet<DiagonalPacketLoadMode,PacketType>(id));
diff --git a/Eigen/src/Core/Redux.h b/Eigen/src/Core/Redux.h
index e05a0cc..d3efad9 100644
--- a/Eigen/src/Core/Redux.h
+++ b/Eigen/src/Core/Redux.h
@@ -240,7 +240,7 @@
     const int packetAlignment = unpacket_traits<PacketScalar>::alignment;
     enum {
       alignment0 = (bool(Evaluator::Flags & DirectAccessBit) && bool(packet_traits<Scalar>::AlignedOnScalar)) ? int(packetAlignment) : int(Unaligned),
-      alignment = EIGEN_PLAIN_ENUM_MAX(alignment0, Evaluator::Alignment)
+      alignment = plain_enum_max(alignment0, Evaluator::Alignment)
     };
     const Index alignedStart = internal::first_default_aligned(xpr);
     const Index alignedSize2 = ((size-alignedStart)/(2*packetSize))*(2*packetSize);
diff --git a/Eigen/src/Core/Reshaped.h b/Eigen/src/Core/Reshaped.h
index 8a9cedb..9448445 100644
--- a/Eigen/src/Core/Reshaped.h
+++ b/Eigen/src/Core/Reshaped.h
@@ -445,7 +445,7 @@
     : mapbase_evaluator<XprType, typename XprType::PlainObject>(xpr)
   {
     // TODO: for the 3.4 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime
-    eigen_assert(((internal::UIntPtr(xpr.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator<XprType>::Alignment)) == 0) && "data is not aligned");
+    eigen_assert(((internal::UIntPtr(xpr.data()) % plain_enum_max(1, evaluator<XprType>::Alignment)) == 0) && "data is not aligned");
   }
 };
 
diff --git a/Eigen/src/Core/VectorwiseOp.h b/Eigen/src/Core/VectorwiseOp.h
index 3c3cc45..5d4c11f 100644
--- a/Eigen/src/Core/VectorwiseOp.h
+++ b/Eigen/src/Core/VectorwiseOp.h
@@ -232,9 +232,9 @@
     typename ExtendedType<OtherDerived>::Type
     extendedTo(const DenseBase<OtherDerived>& other) const
     {
-      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isVertical, OtherDerived::MaxColsAtCompileTime==1),
+      EIGEN_STATIC_ASSERT(internal::check_implication(isVertical, OtherDerived::MaxColsAtCompileTime==1),
                           YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED)
-      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isHorizontal, OtherDerived::MaxRowsAtCompileTime==1),
+      EIGEN_STATIC_ASSERT(internal::check_implication(isHorizontal, OtherDerived::MaxRowsAtCompileTime==1),
                           YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED)
       return typename ExtendedType<OtherDerived>::Type
                       (other.derived(),
@@ -255,9 +255,9 @@
     typename OppositeExtendedType<OtherDerived>::Type
     extendedToOpposite(const DenseBase<OtherDerived>& other) const
     {
-      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isHorizontal, OtherDerived::MaxColsAtCompileTime==1),
+      EIGEN_STATIC_ASSERT(internal::check_implication(isHorizontal, OtherDerived::MaxColsAtCompileTime==1),
                           YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED)
-      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isVertical, OtherDerived::MaxRowsAtCompileTime==1),
+      EIGEN_STATIC_ASSERT(internal::check_implication(isVertical, OtherDerived::MaxRowsAtCompileTime==1),
                           YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED)
       return typename OppositeExtendedType<OtherDerived>::Type
                       (other.derived(),
diff --git a/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h b/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h
index 071acf0..5f2a130 100644
--- a/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h
+++ b/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h
@@ -440,6 +440,7 @@
 EIGEN_UNUSED
 Packet pexp_float(const Packet _x)
 {
+  const Packet cst_zero   = pset1<Packet>(0.0f);
   const Packet cst_1      = pset1<Packet>(1.0f);
   const Packet cst_half   = pset1<Packet>(0.5f);
   const Packet cst_exp_hi = pset1<Packet>( 88.723f);
@@ -454,7 +455,8 @@
   const Packet cst_cephes_exp_p5 = pset1<Packet>(5.0000001201E-1f);
 
   // Clamp x.
-  Packet x = pmax(pmin(_x, cst_exp_hi), cst_exp_lo);
+  Packet zero_mask = pcmp_lt(_x, cst_exp_lo);
+  Packet x = pmin(_x, cst_exp_hi);
 
   // Express exp(x) as exp(m*ln(2) + r), start by extracting
   // m = floor(x/ln(2) + 0.5).
@@ -483,7 +485,7 @@
 
   // Return 2^m * exp(r).
   // TODO: replace pldexp with faster implementation since y in [-1, 1).
-  return pmax(pldexp(y,m), _x);
+  return pselect(zero_mask, cst_zero, pmax(pldexp(y,m), _x));
 }
 
 template <typename Packet>
@@ -492,7 +494,7 @@
 Packet pexp_double(const Packet _x)
 {
   Packet x = _x;
-
+  const Packet cst_zero = pset1<Packet>(0.0f);
   const Packet cst_1 = pset1<Packet>(1.0);
   const Packet cst_2 = pset1<Packet>(2.0);
   const Packet cst_half = pset1<Packet>(0.5);
@@ -514,7 +516,8 @@
   Packet tmp, fx;
 
   // clamp x
-  x = pmax(pmin(x, cst_exp_hi), cst_exp_lo);
+  Packet zero_mask = pcmp_lt(_x, cst_exp_lo);
+  x = pmin(x, cst_exp_hi);
   // Express exp(x) as exp(g + n*log(2)).
   fx = pmadd(cst_cephes_LOG2EF, x, cst_half);
 
@@ -552,7 +555,7 @@
   // Construct the result 2^n * exp(g) = e * x. The max is used to catch
   // non-finite values in the input.
   // TODO: replace pldexp with faster implementation since x in [-1, 1).
-  return pmax(pldexp(x,fx), _x);
+  return pselect(zero_mask, cst_zero, pmax(pldexp(x,fx), _x));
 }
 
 // The following code is inspired by the following stack-overflow answer:
@@ -621,7 +624,7 @@
 template<bool ComputeSine,typename Packet>
 EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
 EIGEN_UNUSED
-#if EIGEN_GNUC_AT_LEAST(4,4) && EIGEN_COMP_GNUC_STRICT
+#if EIGEN_COMP_GNUC_STRICT
 __attribute__((optimize("-fno-unsafe-math-optimizations")))
 #endif
 Packet psincos_float(const Packet& _x)
diff --git a/Eigen/src/Core/arch/NEON/PacketMath.h b/Eigen/src/Core/arch/NEON/PacketMath.h
index e908bf5..707f7d7 100644
--- a/Eigen/src/Core/arch/NEON/PacketMath.h
+++ b/Eigen/src/Core/arch/NEON/PacketMath.h
@@ -450,15 +450,6 @@
   };
 };
 
-#if EIGEN_GNUC_AT_MOST(4, 4) && !EIGEN_COMP_LLVM
-// workaround gcc 4.2, 4.3 and 4.4 compilation issue
-EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); }
-EIGEN_STRONG_INLINE float32x2_t vld1_f32(const float* x) { return ::vld1_f32 ((const float32_t*)x); }
-EIGEN_STRONG_INLINE float32x2_t vld1_dup_f32(const float* x) { return ::vld1_dup_f32 ((const float32_t*)x); }
-EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); }
-EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); }
-#endif
-
 template<> struct unpacket_traits<Packet2f>
 {
   typedef float type;
diff --git a/Eigen/src/Core/arch/SSE/Complex.h b/Eigen/src/Core/arch/SSE/Complex.h
index ccee04b..61e9406 100644
--- a/Eigen/src/Core/arch/SSE/Complex.h
+++ b/Eigen/src/Core/arch/SSE/Complex.h
@@ -137,17 +137,9 @@
 
 template<> EIGEN_STRONG_INLINE std::complex<float>  pfirst<Packet2cf>(const Packet2cf& a)
 {
-  #if EIGEN_GNUC_AT_MOST(4,3)
-  // Workaround gcc 4.2 ICE - this is not performance wise ideal, but who cares...
-  // This workaround also fix invalid code generation with gcc 4.3
-  EIGEN_ALIGN16 std::complex<float> res[2];
-  _mm_store_ps((float*)res, a.v);
-  return res[0];
-  #else
   std::complex<float> res;
   _mm_storel_pi((__m64*)&res, a.v);
   return res;
-  #endif
 }
 
 template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) { return Packet2cf(_mm_castpd_ps(preverse(Packet2d(_mm_castps_pd(a.v))))); }
diff --git a/Eigen/src/Core/arch/SSE/PacketMath.h b/Eigen/src/Core/arch/SSE/PacketMath.h
index f2d2667..45e219c 100755
--- a/Eigen/src/Core/arch/SSE/PacketMath.h
+++ b/Eigen/src/Core/arch/SSE/PacketMath.h
@@ -247,18 +247,9 @@
 template<> struct scalar_div_cost<double,true> { enum { value = 8 }; };
 #endif
 
-#if EIGEN_COMP_MSVC==1500
-// Workaround MSVC 9 internal compiler error.
-// TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode
-// TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)).
-template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return _mm_set_ps(from,from,from,from); }
-template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }
-template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from) { return _mm_set_epi32(from,from,from,from); }
-#else
 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return _mm_set_ps1(from); }
 template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from) { return _mm_set1_epi32(from); }
-#endif
 template<> EIGEN_STRONG_INLINE Packet16b pset1<Packet16b>(const bool&    from) { return _mm_set1_epi8(static_cast<char>(from)); }
 
 template<> EIGEN_STRONG_INLINE Packet4f pset1frombits<Packet4f>(unsigned int from) { return _mm_castsi128_ps(pset1<Packet4i>(from)); }
@@ -721,15 +712,7 @@
 #if EIGEN_COMP_MSVC
   template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float*  from) {
     EIGEN_DEBUG_UNALIGNED_LOAD
-    #if (EIGEN_COMP_MSVC==1600)
-    // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps
-    // (i.e., it does not generate an unaligned load!!
-    __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));
-    res = _mm_loadh_pi(res, (const __m64*)(from+2));
-    return res;
-    #else
     return _mm_loadu_ps(from);
-    #endif
   }
 #else
 // NOTE: with the code below, MSVC's compiler crashes!
diff --git a/Eigen/src/Core/functors/NullaryFunctors.h b/Eigen/src/Core/functors/NullaryFunctors.h
index 8e43266..0293a99 100644
--- a/Eigen/src/Core/functors/NullaryFunctors.h
+++ b/Eigen/src/Core/functors/NullaryFunctors.h
@@ -154,7 +154,7 @@
 
 // For unreliable compilers, let's specialize the has_*ary_operator
 // helpers so that at least built-in nullary functors work fine.
-#if !( (EIGEN_COMP_MSVC>1600) || (EIGEN_GNUC_AT_LEAST(4,8)) || (EIGEN_COMP_ICC>=1600))
+#if !( EIGEN_COMP_MSVC || EIGEN_COMP_GNUC || (EIGEN_COMP_ICC>=1600))
 template<typename Scalar,typename IndexType>
 struct has_nullary_operator<scalar_constant_op<Scalar>,IndexType> { enum { value = 1}; };
 template<typename Scalar,typename IndexType>
diff --git a/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
index 89e999b..938f5fb 100644
--- a/Eigen/src/Core/products/GeneralBlockPanelKernel.h
+++ b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
@@ -442,7 +442,7 @@
     nr = 4,
 
     // register block size along the M direction (currently, this one cannot be modified)
-    default_mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,
+    default_mr = (plain_enum_min(16, NumberOfRegisters)/2/nr)*LhsPacketSize,
 #if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX) \
     && ((!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1914))
     // we assume 16 registers or more
@@ -571,7 +571,7 @@
     // we assume 16 registers
     mr = 3*LhsPacketSize,
 #else
-    mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,
+    mr = (plain_enum_min(16, NumberOfRegisters)/2/nr)*LhsPacketSize,
 #endif
 
     LhsProgress = LhsPacketSize,
@@ -954,7 +954,7 @@
     NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
     // FIXME: should depend on NumberOfRegisters
     nr = 4,
-    mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*ResPacketSize,
+    mr = (plain_enum_min(16, NumberOfRegisters)/2/nr)*ResPacketSize,
 
     LhsProgress = ResPacketSize,
     RhsProgress = 1
diff --git a/Eigen/src/Core/products/GeneralMatrixMatrix.h b/Eigen/src/Core/products/GeneralMatrixMatrix.h
index 007c71e..df64232 100644
--- a/Eigen/src/Core/products/GeneralMatrixMatrix.h
+++ b/Eigen/src/Core/products/GeneralMatrixMatrix.h
@@ -422,7 +422,7 @@
   typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
 
   enum {
-    MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
+    MaxDepthAtCompileTime = min_size_prefer_fixed(Lhs::MaxColsAtCompileTime, Rhs::MaxRowsAtCompileTime)
   };
 
   typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct;
diff --git a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
index fc6f838..465294b 100644
--- a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
+++ b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
@@ -144,7 +144,7 @@
   typedef typename Traits::ResScalar ResScalar;
 
   enum {
-    BlockSize  = meta_least_common_multiple<EIGEN_PLAIN_ENUM_MAX(mr,nr),EIGEN_PLAIN_ENUM_MIN(mr,nr)>::ret
+    BlockSize  = meta_least_common_multiple<plain_enum_max(mr, nr), plain_enum_min(mr,nr)>::ret
   };
   void operator()(ResScalar* _res, Index resIncr, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index size, Index depth, const ResScalar& alpha)
   {
diff --git a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
index 504fa0c..f6fdbca 100644
--- a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
+++ b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
@@ -314,10 +314,10 @@
     const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
   {
     product_selfadjoint_matrix<Scalar, Index,
-      EIGEN_LOGICAL_XOR(RhsSelfAdjoint,RhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
-      RhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsSelfAdjoint,ConjugateRhs),
-      EIGEN_LOGICAL_XOR(LhsSelfAdjoint,LhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
-      LhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsSelfAdjoint,ConjugateLhs),
+      logical_xor(RhsSelfAdjoint,RhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
+      RhsSelfAdjoint, NumTraits<Scalar>::IsComplex && logical_xor(RhsSelfAdjoint, ConjugateRhs),
+      logical_xor(LhsSelfAdjoint,LhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
+      LhsSelfAdjoint, NumTraits<Scalar>::IsComplex && logical_xor(LhsSelfAdjoint, ConjugateLhs),
       ColMajor,ResInnerStride>
       ::run(cols, rows,  rhs, rhsStride,  lhs, lhsStride,  res, resIncr, resStride,  alpha, blocking);
   }
@@ -523,10 +523,10 @@
     BlockingType blocking(lhs.rows(), rhs.cols(), lhs.cols(), 1, false);
 
     internal::product_selfadjoint_matrix<Scalar, Index,
-      EIGEN_LOGICAL_XOR(LhsIsUpper,internal::traits<Lhs>::Flags &RowMajorBit) ? RowMajor : ColMajor, LhsIsSelfAdjoint,
-      NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsIsUpper,bool(LhsBlasTraits::NeedToConjugate)),
-      EIGEN_LOGICAL_XOR(RhsIsUpper,internal::traits<Rhs>::Flags &RowMajorBit) ? RowMajor : ColMajor, RhsIsSelfAdjoint,
-      NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsIsUpper,bool(RhsBlasTraits::NeedToConjugate)),
+      internal::logical_xor(LhsIsUpper, internal::traits<Lhs>::Flags &RowMajorBit) ? RowMajor : ColMajor, LhsIsSelfAdjoint,
+      NumTraits<Scalar>::IsComplex && internal::logical_xor(LhsIsUpper, bool(LhsBlasTraits::NeedToConjugate)),
+      internal::logical_xor(RhsIsUpper, internal::traits<Rhs>::Flags &RowMajorBit) ? RowMajor : ColMajor, RhsIsSelfAdjoint,
+      NumTraits<Scalar>::IsComplex && internal::logical_xor(RhsIsUpper, bool(RhsBlasTraits::NeedToConjugate)),
       internal::traits<Dest>::Flags&RowMajorBit  ? RowMajor : ColMajor,
       Dest::InnerStrideAtCompileTime>
       ::run(
diff --git a/Eigen/src/Core/products/SelfadjointMatrixVector.h b/Eigen/src/Core/products/SelfadjointMatrixVector.h
index 3176398..086638e 100644
--- a/Eigen/src/Core/products/SelfadjointMatrixVector.h
+++ b/Eigen/src/Core/products/SelfadjointMatrixVector.h
@@ -57,12 +57,12 @@
     FirstTriangular = IsRowMajor == IsLower
   };
 
-  conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs,  IsRowMajor), ConjugateRhs> cj0;
-  conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> cj1;
+  conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && logical_xor(ConjugateLhs,  IsRowMajor), ConjugateRhs> cj0;
+  conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && logical_xor(ConjugateLhs, !IsRowMajor), ConjugateRhs> cj1;
   conj_helper<RealScalar,Scalar,false, ConjugateRhs> cjd;
 
-  conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs,  IsRowMajor), ConjugateRhs> pcj0;
-  conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1;
+  conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && logical_xor(ConjugateLhs,  IsRowMajor), ConjugateRhs> pcj0;
+  conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && logical_xor(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1;
 
   Scalar cjAlpha = ConjugateRhs ? numext::conj(alpha) : alpha;
 
@@ -183,7 +183,7 @@
   {
     typedef typename Dest::Scalar ResScalar;
     typedef typename Rhs::Scalar RhsScalar;
-    typedef Map<Matrix<ResScalar,Dynamic,1>, EIGEN_PLAIN_ENUM_MIN(AlignedMax,internal::packet_traits<ResScalar>::size)> MappedDest;
+    typedef Map<Matrix<ResScalar,Dynamic,1>, plain_enum_min(AlignedMax,internal::packet_traits<ResScalar>::size)> MappedDest;
     
     eigen_assert(dest.rows()==a_lhs.rows() && dest.cols()==a_rhs.cols());
 
diff --git a/Eigen/src/Core/products/TriangularMatrixMatrix.h b/Eigen/src/Core/products/TriangularMatrixMatrix.h
index 60d4b05..5b8ca12 100644
--- a/Eigen/src/Core/products/TriangularMatrixMatrix.h
+++ b/Eigen/src/Core/products/TriangularMatrixMatrix.h
@@ -91,7 +91,7 @@
   
   typedef gebp_traits<Scalar,Scalar> Traits;
   enum {
-    SmallPanelWidth   = 2 * EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
+    SmallPanelWidth   = 2 * plain_enum_max(Traits::mr, Traits::nr),
     IsLower = (Mode&Lower) == Lower,
     SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1
   };
@@ -249,7 +249,7 @@
 {
   typedef gebp_traits<Scalar,Scalar> Traits;
   enum {
-    SmallPanelWidth   = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
+    SmallPanelWidth   = plain_enum_max(Traits::mr, Traits::nr),
     IsLower = (Mode&Lower) == Lower,
     SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1
   };
diff --git a/Eigen/src/Core/products/TriangularMatrixVector.h b/Eigen/src/Core/products/TriangularMatrixVector.h
index 754345f..c6d5afa 100644
--- a/Eigen/src/Core/products/TriangularMatrixVector.h
+++ b/Eigen/src/Core/products/TriangularMatrixVector.h
@@ -218,7 +218,7 @@
     typedef internal::blas_traits<Rhs> RhsBlasTraits;
     typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
     
-    typedef Map<Matrix<ResScalar,Dynamic,1>, EIGEN_PLAIN_ENUM_MIN(AlignedMax,internal::packet_traits<ResScalar>::size)> MappedDest;
+    typedef Map<Matrix<ResScalar,Dynamic,1>, plain_enum_min(AlignedMax,internal::packet_traits<ResScalar>::size)> MappedDest;
 
     typename internal::add_const_on_value_type<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(lhs);
     typename internal::add_const_on_value_type<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(rhs);
diff --git a/Eigen/src/Core/products/TriangularSolverMatrix.h b/Eigen/src/Core/products/TriangularSolverMatrix.h
index 0abc468..520cfc9 100644
--- a/Eigen/src/Core/products/TriangularSolverMatrix.h
+++ b/Eigen/src/Core/products/TriangularSolverMatrix.h
@@ -63,7 +63,7 @@
     typedef gebp_traits<Scalar,Scalar> Traits;
 
     enum {
-      SmallPanelWidth   = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
+      SmallPanelWidth   = plain_enum_max(Traits::mr, Traits::nr),
       IsLower = (Mode&Lower) == Lower
     };
 
@@ -216,7 +216,7 @@
     typedef gebp_traits<Scalar,Scalar> Traits;
     enum {
       RhsStorageOrder   = TriStorageOrder,
-      SmallPanelWidth   = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
+      SmallPanelWidth   = plain_enum_max(Traits::mr, Traits::nr),
       IsLower = (Mode&Lower) == Lower
     };
 
diff --git a/Eigen/src/Core/util/ConfigureVectorization.h b/Eigen/src/Core/util/ConfigureVectorization.h
index 16ca3ef..ba2049b 100644
--- a/Eigen/src/Core/util/ConfigureVectorization.h
+++ b/Eigen/src/Core/util/ConfigureVectorization.h
@@ -30,27 +30,13 @@
  *
  * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link
  * vectorized and non-vectorized code.
- * 
- * FIXME: this code can be cleaned up once we switch to proper C++11 only.
  */
 #if (defined EIGEN_CUDACC)
   #define EIGEN_ALIGN_TO_BOUNDARY(n) __align__(n)
   #define EIGEN_ALIGNOF(x) __alignof(x)
-#elif EIGEN_HAS_ALIGNAS
+#else
   #define EIGEN_ALIGN_TO_BOUNDARY(n) alignas(n)
   #define EIGEN_ALIGNOF(x) alignof(x)
-#elif EIGEN_COMP_GNUC || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM
-  #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
-  #define EIGEN_ALIGNOF(x) __alignof(x)
-#elif EIGEN_COMP_MSVC
-  #define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n))
-  #define EIGEN_ALIGNOF(x) __alignof(x)
-#elif EIGEN_COMP_SUNCC
-  // FIXME not sure about this one:
-  #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
-  #define EIGEN_ALIGNOF(x) __alignof(x)
-#else
-  #error Please tell me what is the equivalent of alignas(n) and alignof(x) for your compiler
 #endif
 
 // If the user explicitly disable vectorization, then we also disable alignment
@@ -105,11 +91,6 @@
   // try to keep heap alignment even when we have to disable static alignment.
   #if EIGEN_COMP_GNUC && !(EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64 || EIGEN_ARCH_PPC || EIGEN_ARCH_IA64 || EIGEN_ARCH_MIPS)
   #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
-  #elif EIGEN_ARCH_ARM_OR_ARM64 && EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_MOST(4, 6)
-  // Old versions of GCC on ARM, at least 4.4, were once seen to have buggy static alignment support.
-  // Not sure which version fixed it, hopefully it doesn't affect 4.7, which is still somewhat in use.
-  // 4.8 and newer seem definitely unaffected.
-  #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
   #else
   #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 0
   #endif
@@ -200,14 +181,12 @@
 // removed as gcc 4.1 and msvc 2008 are not supported anyways.
 #if EIGEN_COMP_MSVC
   #include <malloc.h> // for _aligned_malloc -- need it regardless of whether vectorization is enabled
-  #if (EIGEN_COMP_MSVC >= 1500) // 2008 or later
-    // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP.
-    #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || EIGEN_ARCH_x86_64
-      #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER
-    #endif
+  // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP.
+  #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || EIGEN_ARCH_x86_64
+    #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER
   #endif
 #else
-  #if (defined __SSE2__) && ( (!EIGEN_COMP_GNUC) || EIGEN_COMP_ICC || EIGEN_GNUC_AT_LEAST(4,2) )
+  #if (defined __SSE2__) && ( (!EIGEN_COMP_GNUC) || EIGEN_COMP_ICC || EIGEN_COMP_GNUC )
     #define EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC
   #endif
 #endif
diff --git a/Eigen/src/Core/util/ForwardDeclarations.h b/Eigen/src/Core/util/ForwardDeclarations.h
index 6b0ac50..2f33599 100644
--- a/Eigen/src/Core/util/ForwardDeclarations.h
+++ b/Eigen/src/Core/util/ForwardDeclarations.h
@@ -53,20 +53,9 @@
 
 template<typename Scalar_, int Rows_, int Cols_,
          int Options_ = AutoAlign |
-#if EIGEN_GNUC_AT(3,4)
-    // workaround a bug in at least gcc 3.4.6
-    // the innermost ?: ternary operator is misparsed. We write it slightly
-    // differently and this makes gcc 3.4.6 happy, but it's ugly.
-    // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
-    // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
-                          ( (Rows_==1 && Cols_!=1) ? Eigen::RowMajor
-                          : !(Cols_==1 && Rows_!=1) ?  EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
-                          : Eigen::ColMajor ),
-#else
                           ( (Rows_==1 && Cols_!=1) ? Eigen::RowMajor
                           : (Cols_==1 && Rows_!=1) ? Eigen::ColMajor
                           : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
-#endif
          int MaxRows_ = Rows_,
          int MaxCols_ = Cols_
 > class Matrix;
@@ -246,20 +235,9 @@
 // Array module
 template<typename Scalar_, int Rows_, int Cols_,
          int Options_ = AutoAlign |
-#if EIGEN_GNUC_AT(3,4)
-    // workaround a bug in at least gcc 3.4.6
-    // the innermost ?: ternary operator is misparsed. We write it slightly
-    // differently and this makes gcc 3.4.6 happy, but it's ugly.
-    // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
-    // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
-                          ( (Rows_==1 && Cols_!=1) ? Eigen::RowMajor
-                          : !(Cols_==1 && Rows_!=1) ?  EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
-                          : Eigen::ColMajor ),
-#else
                           ( (Rows_==1 && Cols_!=1) ? Eigen::RowMajor
                           : (Cols_==1 && Rows_!=1) ? Eigen::ColMajor
                           : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
-#endif
          int MaxRows_ = Rows_, int MaxCols_ = Cols_> class Array;
 template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType> class Select;
 template<typename MatrixType, typename BinaryOp, int Direction> class PartialReduxExpr;
diff --git a/Eigen/src/Core/util/IntegralConstant.h b/Eigen/src/Core/util/IntegralConstant.h
index 0a9990c..6a60e11 100644
--- a/Eigen/src/Core/util/IntegralConstant.h
+++ b/Eigen/src/Core/util/IntegralConstant.h
@@ -79,14 +79,10 @@
   template<int M>
   FixedInt<N&M> operator&( FixedInt<M>) const { return FixedInt<N&M>(); }
 
-#if EIGEN_HAS_CXX14_VARIABLE_TEMPLATES
   // Needed in C++14 to allow fix<N>():
   FixedInt operator() () const { return *this; }
 
   VariableAndFixedInt<N> operator() (int val) const { return VariableAndFixedInt<N>(val); }
-#else
-  FixedInt ( FixedInt<N> (*)() ) {}
-#endif
 
   FixedInt(std::integral_constant<int,N>) {}
 };
@@ -138,12 +134,6 @@
   static const int value = N;
 };
 
-#if !EIGEN_HAS_CXX14_VARIABLE_TEMPLATES
-template<int N,int Default> struct get_fixed_value<FixedInt<N> (*)(),Default> {
-  static const int value = N;
-};
-#endif
-
 template<int N,int Default> struct get_fixed_value<VariableAndFixedInt<N>,Default> {
   static const int value = N ;
 };
@@ -154,9 +144,6 @@
 };
 
 template<typename T> EIGEN_DEVICE_FUNC Index get_runtime_value(const T &x) { return x; }
-#if !EIGEN_HAS_CXX14_VARIABLE_TEMPLATES
-template<int N> EIGEN_DEVICE_FUNC Index get_runtime_value(FixedInt<N> (*)()) { return N; }
-#endif
 
 // Cleanup integer/FixedInt/VariableAndFixedInt/etc types:
 
@@ -166,11 +153,6 @@
 // Convert any integral type (e.g., short, int, unsigned int, etc.) to Eigen::Index
 template<typename T, int DynamicKey> struct cleanup_index_type<T,DynamicKey,typename internal::enable_if<internal::is_integral<T>::value>::type> { typedef Index type; };
 
-#if !EIGEN_HAS_CXX14_VARIABLE_TEMPLATES
-// In c++98/c++11, fix<N> is a pointer to function that we better cleanup to a true FixedInt<N>:
-template<int N, int DynamicKey> struct cleanup_index_type<FixedInt<N> (*)(), DynamicKey> { typedef FixedInt<N> type; };
-#endif
-
 // If VariableAndFixedInt does not match DynamicKey, then we turn it to a pure compile-time value:
 template<int N, int DynamicKey> struct cleanup_index_type<VariableAndFixedInt<N>, DynamicKey> { typedef FixedInt<N> type; };
 // If VariableAndFixedInt matches DynamicKey, then we turn it to a pure runtime-value (aka Index):
@@ -182,18 +164,8 @@
 
 #ifndef EIGEN_PARSED_BY_DOXYGEN
 
-#if EIGEN_HAS_CXX14_VARIABLE_TEMPLATES
 template<int N>
 static const internal::FixedInt<N> fix{};
-#else
-template<int N>
-inline internal::FixedInt<N> fix() { return internal::FixedInt<N>(); }
-
-// The generic typename T is mandatory. Otherwise, a code like fix<N> could refer to either the function above or this next overload.
-// This way a code like fix<N> can only refer to the previous function.
-template<int N,typename T>
-inline internal::VariableAndFixedInt<N> fix(T val) { return internal::VariableAndFixedInt<N>(internal::convert_index<int>(val)); }
-#endif
 
 #else // EIGEN_PARSED_BY_DOXYGEN
 
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
index c5cbf73..0aafb9e 100644
--- a/Eigen/src/Core/util/Macros.h
+++ b/Eigen/src/Core/util/Macros.h
@@ -133,10 +133,6 @@
 
 // For the record, here is a table summarizing the possible values for EIGEN_COMP_MSVC:
 //  name        ver   MSC_VER
-//  2008         9      1500
-//  2010        10      1600
-//  2012        11      1700
-//  2013        12      1800
 //  2015        14      1900
 //  "15"        15      1900
 //  2017-14.1   15.0    1910
@@ -144,6 +140,9 @@
 //  2017-14.12  15.5    1912
 //  2017-14.13  15.6    1913
 //  2017-14.14  15.7    1914
+//  2017        15.8    1915
+//  2017        15.9    1916
+//  2019 RTW    16.0    1920
 
 /// \internal EIGEN_COMP_MSVC_LANG set to _MSVC_LANG if the compiler is Microsoft Visual C++, 0 otherwise.
 #if defined(_MSVC_LANG)
@@ -585,16 +584,6 @@
 # define __has_feature(x) 0
 #endif
 
-// Some old compilers do not support template specializations like:
-// template<typename T,int N> void foo(const T x[N]);
-#if !(   EIGEN_COMP_CLANG && (   (EIGEN_COMP_CLANG<309)                                                       \
-                              || (defined(__apple_build_version__) && (__apple_build_version__ < 9000000)))  \
-      || EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<49)
-#define EIGEN_HAS_STATIC_ARRAY_TEMPLATE 1
-#else
-#define EIGEN_HAS_STATIC_ARRAY_TEMPLATE 0
-#endif
-
 // The macro EIGEN_CPLUSPLUS is a replacement for __cplusplus/_MSVC_LANG that
 // works for both platforms, indicating the C++ standard version number.
 //
@@ -627,30 +616,17 @@
   #define EIGEN_COMP_CXXVER 03
 #endif
 
-#ifndef EIGEN_HAS_CXX14_VARIABLE_TEMPLATES
-  #if defined(__cpp_variable_templates) && __cpp_variable_templates >= 201304 && EIGEN_MAX_CPP_VER>=14
-    #define EIGEN_HAS_CXX14_VARIABLE_TEMPLATES 1
-  #else
-    #define EIGEN_HAS_CXX14_VARIABLE_TEMPLATES 0
-  #endif
-#endif
-
-
 // The macros EIGEN_HAS_CXX?? defines a rough estimate of available c++ features
 // but in practice we should not rely on them but rather on the availability of
 // individual features as defined later.
 // This is why there is no EIGEN_HAS_CXX17.
-// FIXME: get rid of EIGEN_HAS_CXX14.
-#if EIGEN_MAX_CPP_VER<11 || EIGEN_COMP_CXXVER<11 || (EIGEN_COMP_MSVC && EIGEN_COMP_MSVC < 1700) || (EIGEN_COMP_ICC && EIGEN_COMP_ICC < 1400)
+#if EIGEN_MAX_CPP_VER<14 || EIGEN_COMP_CXXVER<14 || (EIGEN_COMP_MSVC && EIGEN_COMP_MSVC < 1900) || \
+  (EIGEN_COMP_ICC && EIGEN_COMP_ICC < 1500) || (EIGEN_COMP_NVCC && EIGEN_COMP_NVCC < 80000) ||     \
+  (EIGEN_COMP_CLANG && ((EIGEN_COMP_CLANG<309) || (defined(__apple_build_version__) && (__apple_build_version__ < 9000000)))) || \
+  (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<51)
 #error This compiler appears to be too old to be supported by Eigen
 #endif
 
-#if EIGEN_MAX_CPP_VER>=14 && EIGEN_COMP_CXXVER>=14
-#define EIGEN_HAS_CXX14 1
-#else
-#define EIGEN_HAS_CXX14 0
-#endif
-
 // Does the compiler support C99?
 // Need to include <cmath> to make sure _GLIBCXX_USE_C99 gets defined
 #include <cmath>
@@ -658,7 +634,7 @@
 #if ((defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901))       \
   || (defined(__GNUC__) && defined(_GLIBCXX_USE_C99)) \
   || (defined(_LIBCPP_VERSION) && !defined(_MSC_VER)) \
-  || (EIGEN_COMP_MSVC >= 1900) || defined(SYCL_DEVICE_ONLY))
+  || (EIGEN_COMP_MSVC) || defined(SYCL_DEVICE_ONLY))
   #define EIGEN_HAS_C99_MATH 1
 #else
   #define EIGEN_HAS_C99_MATH 0
@@ -694,57 +670,14 @@
 #endif
 #endif
 
-#ifndef EIGEN_HAS_ALIGNAS
-#if (     __has_feature(cxx_alignas)            \
-        ||  EIGEN_HAS_CXX14                       \
-        || (EIGEN_COMP_MSVC >= 1800)              \
-        || (EIGEN_GNUC_AT_LEAST(4,8))             \
-        || (EIGEN_COMP_CLANG>=305)                \
-        || (EIGEN_COMP_ICC>=1500)                 \
-        || (EIGEN_COMP_PGI>=1500)                 \
-        || (EIGEN_COMP_SUNCC>=0x5130))
-#define EIGEN_HAS_ALIGNAS 1
-#else
-#define EIGEN_HAS_ALIGNAS 0
-#endif
-#endif
-
-// Does the compiler support type_traits?
-// - full support of type traits was added only to GCC 5.1.0.
-// - 20150626 corresponds to the last release of 4.x libstdc++
-#ifndef EIGEN_HAS_TYPE_TRAITS
-#if ((!EIGEN_COMP_GNUC_STRICT) || EIGEN_GNUC_AT_LEAST(5, 1)) \
-  && ((!defined(__GLIBCXX__))   || __GLIBCXX__ > 20150626)
-#define EIGEN_HAS_TYPE_TRAITS 1
-#define EIGEN_INCLUDE_TYPE_TRAITS
-#else
-#define EIGEN_HAS_TYPE_TRAITS 0
-#endif
-#endif
-
-// Does the compiler support variadic templates?
-#ifndef EIGEN_HAS_VARIADIC_TEMPLATES
-#if (!defined(__NVCC__) || !EIGEN_ARCH_ARM_OR_ARM64 || (EIGEN_COMP_NVCC >= 80000) )
-    // ^^ Disable the use of variadic templates when compiling with versions of nvcc older than 8.0 on ARM devices:
-    //    this prevents nvcc from crashing when compiling Eigen on Tegra X1
-#define EIGEN_HAS_VARIADIC_TEMPLATES 1
-#elif defined(SYCL_DEVICE_ONLY)
-#define EIGEN_HAS_VARIADIC_TEMPLATES 1
-#else
-#define EIGEN_HAS_VARIADIC_TEMPLATES 0
-#endif
-#endif
-
 // Does the compiler fully support const expressions? (as in c++14)
 #ifndef EIGEN_HAS_CONSTEXPR
   #if defined(EIGEN_CUDACC)
   // Const expressions are supported provided that c++11 is enabled and we're using either clang or nvcc 7.5 or above
-    #if EIGEN_MAX_CPP_VER>=14 && (EIGEN_COMP_CLANG || EIGEN_COMP_NVCC >= 70500)
+    #if (EIGEN_COMP_CLANG || EIGEN_COMP_NVCC >= 70500)
       #define EIGEN_HAS_CONSTEXPR 1
     #endif
-  #elif EIGEN_MAX_CPP_VER>=14 && (__has_feature(cxx_relaxed_constexpr) || (EIGEN_COMP_CXXVER >= 14) || \
-    (EIGEN_GNUC_AT_LEAST(4,8) && (EIGEN_COMP_CXXVER >= 11)) || \
-    (EIGEN_COMP_CLANG >= 306 && (EIGEN_COMP_CXXVER >= 11)))
+  #else
     #define EIGEN_HAS_CONSTEXPR 1
   #endif
 
@@ -763,8 +696,7 @@
 // Does the compiler support C++11 math?
 // Let's be conservative and enable the default C++11 implementation only if we are sure it exists
 #ifndef EIGEN_HAS_CXX11_MATH
-  #if ((EIGEN_COMP_CXXVER > 11) || (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_MSVC || EIGEN_COMP_ICC)  \
-      && (EIGEN_ARCH_i386_OR_x86_64) && (EIGEN_OS_GNULINUX || EIGEN_OS_WIN_STRICT || EIGEN_OS_MAC))
+  #if (EIGEN_ARCH_i386_OR_x86_64 && (EIGEN_OS_GNULINUX || EIGEN_OS_WIN_STRICT || EIGEN_OS_MAC))
     #define EIGEN_HAS_CXX11_MATH 1
   #else
     #define EIGEN_HAS_CXX11_MATH 0
@@ -848,15 +780,11 @@
 #endif
 #endif
 
-// EIGEN_ALWAYS_INLINE is the stronget, it has the effect of making the function inline and adding every possible
+// EIGEN_ALWAYS_INLINE is the strongest, it has the effect of making the function inline and adding every possible
 // attribute to maximize inlining. This should only be used when really necessary: in particular,
 // it uses __attribute__((always_inline)) on GCC, which most of the time is useless and can severely harm compile times.
 // FIXME with the always_inline attribute,
-// gcc 3.4.x and 4.1 reports the following compilation error:
-//   Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval<Derived> Eigen::MatrixBase<Scalar, Derived>::eval() const'
-//    : function body not available
-//   See also bug 1367
-#if EIGEN_GNUC_AT_LEAST(4,2) && !defined(SYCL_DEVICE_ONLY)
+#if EIGEN_COMP_GNUC && !defined(SYCL_DEVICE_ONLY)
 #define EIGEN_ALWAYS_INLINE __attribute__((always_inline)) inline
 #else
 #define EIGEN_ALWAYS_INLINE EIGEN_STRONG_INLINE
@@ -1037,14 +965,8 @@
       // General, NEON.
       // Clang doesn't like "r",
       //    error: non-trivial scalar-to-vector conversion, possible invalid
-      //           constraint for vector type
-      // GCC < 5 doesn't like "g",
-      //    error: 'asm' operand requires impossible reload
-      #if EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_MOST(5, 0)
-        #define EIGEN_OPTIMIZATION_BARRIER(X)  __asm__  ("" : "+r,w" (X));
-      #else
-        #define EIGEN_OPTIMIZATION_BARRIER(X)  __asm__  ("" : "+g,w" (X));
-      #endif
+      //           constraint for vector typ
+      #define EIGEN_OPTIMIZATION_BARRIER(X)  __asm__  ("" : "+g,w" (X));
     #elif EIGEN_ARCH_i386_OR_x86_64
       // General, SSE.
       #define EIGEN_OPTIMIZATION_BARRIER(X)  __asm__  ("" : "+g,x" (X));
@@ -1098,8 +1020,8 @@
   #define EIGEN_USING_STD(FUNC) using std::FUNC;
 #endif
 
-#if EIGEN_COMP_MSVC_STRICT && (EIGEN_COMP_MSVC < 1900 || EIGEN_COMP_NVCC)
-  // For older MSVC versions, as well as when compiling with NVCC, using the base operator is necessary,
+#if EIGEN_COMP_MSVC_STRICT && EIGEN_COMP_NVCC
+  // Wwhen compiling with NVCC, using the base operator is necessary,
   //   otherwise we get duplicate definition errors
   // For later MSVC versions, we require explicit operator= definition, otherwise we get
   //   use of implicitly deleted operator errors.
@@ -1188,35 +1110,6 @@
   typedef typename Base::PacketScalar PacketScalar;
 
 
-#define EIGEN_PLAIN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b)
-#define EIGEN_PLAIN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b)
-
-// EIGEN_SIZE_MIN_PREFER_DYNAMIC gives the min between compile-time sizes. 0 has absolute priority, followed by 1,
-// followed by Dynamic, followed by other finite values. The reason for giving Dynamic the priority over
-// finite values is that min(3, Dynamic) should be Dynamic, since that could be anything between 0 and 3.
-#define EIGEN_SIZE_MIN_PREFER_DYNAMIC(a,b) (((int)a == 0 || (int)b == 0) ? 0 \
-                           : ((int)a == 1 || (int)b == 1) ? 1 \
-                           : ((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
-                           : ((int)a <= (int)b) ? (int)a : (int)b)
-
-// EIGEN_SIZE_MIN_PREFER_FIXED is a variant of EIGEN_SIZE_MIN_PREFER_DYNAMIC comparing MaxSizes. The difference is that finite values
-// now have priority over Dynamic, so that min(3, Dynamic) gives 3. Indeed, whatever the actual value is
-// (between 0 and 3), it is not more than 3.
-#define EIGEN_SIZE_MIN_PREFER_FIXED(a,b)  (((int)a == 0 || (int)b == 0) ? 0 \
-                           : ((int)a == 1 || (int)b == 1) ? 1 \
-                           : ((int)a == Dynamic && (int)b == Dynamic) ? Dynamic \
-                           : ((int)a == Dynamic) ? (int)b \
-                           : ((int)b == Dynamic) ? (int)a \
-                           : ((int)a <= (int)b) ? (int)a : (int)b)
-
-// see EIGEN_SIZE_MIN_PREFER_DYNAMIC. No need for a separate variant for MaxSizes here.
-#define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
-                           : ((int)a >= (int)b) ? (int)a : (int)b)
-
-#define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b)))
-
-#define EIGEN_IMPLIES(a,b) (!(a) || (b))
-
 #if EIGEN_HAS_BUILTIN(__builtin_expect) || EIGEN_COMP_GNUC
 #define EIGEN_PREDICT_FALSE(x) (__builtin_expect(x, false))
 #define EIGEN_PREDICT_TRUE(x) (__builtin_expect(false || (x), true))
@@ -1255,16 +1148,9 @@
   CwiseBinaryOp<EIGEN_CAT(EIGEN_CAT(internal::scalar_,OPNAME),_op)<SCALAR,typename internal::traits<EXPR>::Scalar>, \
                 const typename internal::plain_constant_type<EXPR,SCALAR>::type, const EXPR>
 
-// Workaround for MSVC 2010 (see ML thread "patch with compile for for MSVC 2010")
-#if EIGEN_COMP_MSVC_STRICT && (EIGEN_COMP_MSVC_STRICT<=1600)
-#define EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(X) typename internal::enable_if<true,X>::type
-#else
-#define EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(X) X
-#endif
-
 #define EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(METHOD,OPNAME) \
   template <typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \
-  EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename internal::promote_scalar_arg<Scalar EIGEN_COMMA T EIGEN_COMMA EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,Scalar,T)>::type,OPNAME))\
+  const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename internal::promote_scalar_arg<Scalar EIGEN_COMMA T EIGEN_COMMA EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,Scalar,T)>::type,OPNAME)\
   (METHOD)(const T& scalar) const { \
     typedef typename internal::promote_scalar_arg<Scalar,T,EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,Scalar,T)>::type PromotedT; \
     return EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,PromotedT,OPNAME)(derived(), \
@@ -1273,7 +1159,7 @@
 
 #define EIGEN_MAKE_SCALAR_BINARY_OP_ONTHELEFT(METHOD,OPNAME) \
   template <typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend \
-  EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename internal::promote_scalar_arg<Scalar EIGEN_COMMA T EIGEN_COMMA EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,T,Scalar)>::type,Derived,OPNAME)) \
+  const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename internal::promote_scalar_arg<Scalar EIGEN_COMMA T EIGEN_COMMA EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,T,Scalar)>::type,Derived,OPNAME) \
   (METHOD)(const T& scalar, const StorageBaseType& matrix) { \
     typedef typename internal::promote_scalar_arg<Scalar,T,EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,T,Scalar)>::type PromotedT; \
     return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(PromotedT,Derived,OPNAME)( \
@@ -1311,14 +1197,12 @@
 #endif
 
 
-#define EIGEN_INCLUDE_TYPE_TRAITS
 #define EIGEN_NOEXCEPT noexcept
 #define EIGEN_NOEXCEPT_IF(x) noexcept(x)
 #define EIGEN_NO_THROW noexcept(true)
 #define EIGEN_EXCEPTION_SPEC(X) noexcept(false)
 
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
 // The all function is used to enable a variadic version of eigen_assert which can take a parameter pack as its input.
 namespace Eigen {
 namespace internal {
@@ -1330,7 +1214,6 @@
 
 }
 }
-#endif
 
 // provide override and final specifiers if they are available:
 #define EIGEN_OVERRIDE override
diff --git a/Eigen/src/Core/util/Memory.h b/Eigen/src/Core/util/Memory.h
index 5211d04..dc6cec7 100644
--- a/Eigen/src/Core/util/Memory.h
+++ b/Eigen/src/Core/util/Memory.h
@@ -936,7 +936,7 @@
          __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id) );
 #    endif
 #  elif EIGEN_COMP_MSVC
-#    if (EIGEN_COMP_MSVC > 1500) && EIGEN_ARCH_i386_OR_x86_64
+#    if EIGEN_ARCH_i386_OR_x86_64
 #      define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id)
 #    endif
 #  endif
diff --git a/Eigen/src/Core/util/Meta.h b/Eigen/src/Core/util/Meta.h
index f2c4dfe..b641f97 100755
--- a/Eigen/src/Core/util/Meta.h
+++ b/Eigen/src/Core/util/Meta.h
@@ -371,7 +371,7 @@
 #endif
 
 // C++14 integer/index_sequence.
-#if defined(__cpp_lib_integer_sequence) && __cpp_lib_integer_sequence >= 201304L && EIGEN_MAX_CPP_VER >= 14
+#if defined(__cpp_lib_integer_sequence) && __cpp_lib_integer_sequence >= 201304L
 
 using std::integer_sequence;
 using std::make_integer_sequence;
@@ -579,6 +579,82 @@
 
 } // end namespace numext
 
+namespace internal {
+/// \internal Returns true if its argument is of integer or enum type.
+/// FIXME this has the same purpose as `is_valid_index_type` in XprHelper.h
+template<typename A>
+constexpr bool is_int_or_enum_v = std::is_enum<A>::value || std::is_integral<A>::value;
+
+/// \internal Gets the minimum of two values which may be integers or enums
+template<typename A, typename B>
+inline constexpr int plain_enum_min(A a, B b) {
+  static_assert(is_int_or_enum_v<A>, "Argument a must be an integer or enum");
+  static_assert(is_int_or_enum_v<B>, "Argument b must be an integer or enum");
+  return ((int) a <= (int) b) ? (int) a : (int) b;
+}
+
+/// \internal Gets the maximum of two values which may be integers or enums
+template<typename A, typename B>
+inline constexpr int plain_enum_max(A a, B b) {
+  static_assert(is_int_or_enum_v<A>, "Argument a must be an integer or enum");
+  static_assert(is_int_or_enum_v<B>, "Argument b must be an integer or enum");
+  return ((int) a >= (int) b) ? (int) a : (int) b;
+}
+
+/**
+ * \internal
+ *  `min_size_prefer_dynamic` gives the min between compile-time sizes. 0 has absolute priority, followed by 1,
+ *  followed by Dynamic, followed by other finite values. The reason for giving Dynamic the priority over
+ *  finite values is that min(3, Dynamic) should be Dynamic, since that could be anything between 0 and 3.
+ */
+template<typename A, typename B>
+inline constexpr int min_size_prefer_dynamic(A a, B b) {
+  static_assert(is_int_or_enum_v<A>, "Argument a must be an integer or enum");
+  static_assert(is_int_or_enum_v<B>, "Argument b must be an integer or enum");
+  if ((int) a == 0 || (int) b == 0) return 0;
+  if ((int) a == 1 || (int) b == 1) return 1;
+  if ((int) a == Dynamic || (int) b == Dynamic) return Dynamic;
+  return plain_enum_min(a, b);
+}
+
+/**
+ * \internal
+ *  min_size_prefer_fixed is a variant of `min_size_prefer_dynamic` comparing MaxSizes. The difference is that finite values
+ *  now have priority over Dynamic, so that min(3, Dynamic) gives 3. Indeed, whatever the actual value is
+ *  (between 0 and 3), it is not more than 3.
+ */
+template<typename A, typename B>
+inline constexpr int min_size_prefer_fixed(A a, B b) {
+  static_assert(is_int_or_enum_v<A>, "Argument a must be an integer or enum");
+  static_assert(is_int_or_enum_v<B>, "Argument b must be an integer or enum");
+  if ((int) a == 0 || (int) b == 0) return 0;
+  if ((int) a == 1 || (int) b == 1) return 1;
+  if ((int) a == Dynamic && (int) b == Dynamic) return Dynamic;
+  if ((int) a == Dynamic) return (int) b;
+  if ((int) b == Dynamic) return (int) a;
+  return plain_enum_min(a, b);
+}
+
+/// \internal see `min_size_prefer_fixed`. No need for a separate variant for MaxSizes here.
+template<typename A, typename B>
+inline constexpr int max_size_prefer_dynamic(A a, B b) {
+  static_assert(is_int_or_enum_v<A>, "Argument a must be an integer or enum");
+  static_assert(is_int_or_enum_v<B>, "Argument b must be an integer or enum");
+  if ((int) a == Dynamic || (int) b == Dynamic) return Dynamic;
+  return plain_enum_max(a, b);
+}
+
+/// \internal Calculate logical XOR at compile time
+inline constexpr bool logical_xor(bool a, bool b) {
+  return (a || b) && !(a && b);
+}
+
+/// \internal Calculate logical IMPLIES at compile time
+inline constexpr bool check_implication(bool a, bool b) {
+  return !a || b;
+}
+} // end namespace internal
+
 } // end namespace Eigen
 
 #endif // EIGEN_META_H
diff --git a/Eigen/src/Core/util/SymbolicIndex.h b/Eigen/src/Core/util/SymbolicIndex.h
index 9135a4a..533890d 100644
--- a/Eigen/src/Core/util/SymbolicIndex.h
+++ b/Eigen/src/Core/util/SymbolicIndex.h
@@ -92,10 +92,8 @@
   template<typename T>
   Index eval(const T& values) const { return derived().eval_impl(values); }
 
-#if EIGEN_HAS_CXX14
   template<typename... Types>
   Index eval(Types&&... values) const { return derived().eval_impl(std::make_tuple(values...)); }
-#endif
 
   NegateExpr<Derived> operator-() const { return NegateExpr<Derived>(derived()); }
 
@@ -143,34 +141,6 @@
   friend QuotientExpr<ValueExpr<internal::FixedInt<N> >,Derived> operator/(internal::FixedInt<N>, const BaseExpr& b)
   { return QuotientExpr<ValueExpr<internal::FixedInt<N> > ,Derived>(ValueExpr<internal::FixedInt<N> >(),b.derived()); }
 
-#if (!EIGEN_HAS_CXX14)
-  template<int N>
-  AddExpr<Derived,ValueExpr<internal::FixedInt<N> > > operator+(internal::FixedInt<N> (*)()) const
-  { return AddExpr<Derived,ValueExpr<internal::FixedInt<N> > >(derived(), ValueExpr<internal::FixedInt<N> >()); }
-  template<int N>
-  AddExpr<Derived,ValueExpr<internal::FixedInt<-N> > > operator-(internal::FixedInt<N> (*)()) const
-  { return AddExpr<Derived,ValueExpr<internal::FixedInt<-N> > >(derived(), ValueExpr<internal::FixedInt<-N> >()); }
-  template<int N>
-  ProductExpr<Derived,ValueExpr<internal::FixedInt<N> > > operator*(internal::FixedInt<N> (*)()) const
-  { return ProductExpr<Derived,ValueExpr<internal::FixedInt<N> > >(derived(),ValueExpr<internal::FixedInt<N> >()); }
-  template<int N>
-  QuotientExpr<Derived,ValueExpr<internal::FixedInt<N> > > operator/(internal::FixedInt<N> (*)()) const
-  { return QuotientExpr<Derived,ValueExpr<internal::FixedInt<N> > >(derived(),ValueExpr<internal::FixedInt<N> >()); }
-
-  template<int N>
-  friend AddExpr<Derived,ValueExpr<internal::FixedInt<N> > > operator+(internal::FixedInt<N> (*)(), const BaseExpr& b)
-  { return AddExpr<Derived,ValueExpr<internal::FixedInt<N> > >(b.derived(), ValueExpr<internal::FixedInt<N> >()); }
-  template<int N>
-  friend AddExpr<NegateExpr<Derived>,ValueExpr<internal::FixedInt<N> > > operator-(internal::FixedInt<N> (*)(), const BaseExpr& b)
-  { return AddExpr<NegateExpr<Derived>,ValueExpr<internal::FixedInt<N> > >(-b.derived(), ValueExpr<internal::FixedInt<N> >()); }
-  template<int N>
-  friend ProductExpr<ValueExpr<internal::FixedInt<N> >,Derived> operator*(internal::FixedInt<N> (*)(), const BaseExpr& b)
-  { return ProductExpr<ValueExpr<internal::FixedInt<N> >,Derived>(ValueExpr<internal::FixedInt<N> >(),b.derived()); }
-  template<int N>
-  friend QuotientExpr<ValueExpr<internal::FixedInt<N> >,Derived> operator/(internal::FixedInt<N> (*)(), const BaseExpr& b)
-  { return QuotientExpr<ValueExpr<internal::FixedInt<N> > ,Derived>(ValueExpr<internal::FixedInt<N> >(),b.derived()); }
-#endif
-
 
   template<typename OtherDerived>
   AddExpr<Derived,OtherDerived> operator+(const BaseExpr<OtherDerived> &b) const
@@ -232,11 +202,9 @@
 
   Index eval_impl(const SymbolValue<Tag> &values) const { return values.value(); }
 
-#if EIGEN_HAS_CXX14
   // C++14 versions suitable for multiple symbols
   template<typename... Types>
   Index eval_impl(const std::tuple<Types...>& values) const { return std::get<SymbolValue<Tag> >(values).value(); }
-#endif
 };
 
 template<typename Arg0>
diff --git a/Eigen/src/Core/util/XprHelper.h b/Eigen/src/Core/util/XprHelper.h
index 5740510..a1314e9 100644
--- a/Eigen/src/Core/util/XprHelper.h
+++ b/Eigen/src/Core/util/XprHelper.h
@@ -14,7 +14,7 @@
 // just a workaround because GCC seems to not really like empty structs
 // FIXME: gcc 4.3 generates bad code when strict-aliasing is enabled
 // so currently we simply disable this optimization for gcc 4.3
-#if EIGEN_COMP_GNUC && !EIGEN_GNUC_AT(4,3)
+#if EIGEN_COMP_GNUC
   #define EIGEN_EMPTY_STRUCT_CTOR(X) \
     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X() {} \
     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X(const X& ) {}
@@ -39,15 +39,7 @@
 // true if T can be considered as an integral index (i.e., and integral type or enum)
 template<typename T> struct is_valid_index_type
 {
-  enum { value =
-#if EIGEN_HAS_TYPE_TRAITS
-    internal::is_integral<T>::value || std::is_enum<T>::value
-#elif EIGEN_COMP_MSVC
-    internal::is_integral<T>::value || __is_enum(T)
-#else
-    // without C++11, we use is_convertible to Index instead of is_integral in order to treat enums as Index.
-    internal::is_convertible<T,Index>::value && !internal::is_same<T,float>::value && !is_same<T,double>::value
-#endif
+  enum { value = internal::is_integral<T>::value || std::is_enum<T>::value
   };
 };
 
@@ -654,8 +646,9 @@
 template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
 struct plain_diag_type
 {
-  enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime),
-         max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(ExpressionType::MaxRowsAtCompileTime, ExpressionType::MaxColsAtCompileTime)
+  enum { diag_size = internal::min_size_prefer_dynamic(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime),
+         max_diag_size = min_size_prefer_fixed(ExpressionType::MaxRowsAtCompileTime,
+                                               ExpressionType::MaxColsAtCompileTime)
   };
   typedef Matrix<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> MatrixDiagType;
   typedef Array<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> ArrayDiagType;
diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h
index 27ea962..ca9e34f 100644
--- a/Eigen/src/Geometry/Transform.h
+++ b/Eigen/src/Geometry/Transform.h
@@ -319,12 +319,12 @@
     check_template_params();
     // prevent conversions as:
     // Affine | AffineCompact | Isometry = Projective
-    EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Projective), Mode==int(Projective)),
+    EIGEN_STATIC_ASSERT(internal::check_implication(OtherMode==int(Projective), Mode==int(Projective)),
                         YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION)
 
     // prevent conversions as:
     // Isometry = Affine | AffineCompact
-    EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Affine)||OtherMode==int(AffineCompact), Mode!=int(Isometry)),
+    EIGEN_STATIC_ASSERT(internal::check_implication(OtherMode==int(Affine)||OtherMode==int(AffineCompact), Mode!=int(Isometry)),
                         YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION)
 
     enum { ModeIsAffineCompact = Mode == int(AffineCompact),
@@ -1404,7 +1404,7 @@
     Dim = TransformType::Dim,
     HDim = TransformType::HDim,
     OtherRows = MatrixType::RowsAtCompileTime,
-    WorkingRows = EIGEN_PLAIN_ENUM_MIN(TransformMatrix::RowsAtCompileTime,HDim)
+    WorkingRows = plain_enum_min(TransformMatrix::RowsAtCompileTime, HDim)
   };
 
   typedef typename MatrixType::PlainObject ResultType;
diff --git a/Eigen/src/Geometry/Umeyama.h b/Eigen/src/Geometry/Umeyama.h
index 5f79b3a..08ef930 100644
--- a/Eigen/src/Geometry/Umeyama.h
+++ b/Eigen/src/Geometry/Umeyama.h
@@ -34,7 +34,7 @@
 struct umeyama_transform_matrix_type
 {
   enum {
-    MinRowsAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime),
+    MinRowsAtCompileTime = internal::min_size_prefer_dynamic(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime),
 
     // When possible we want to choose some small fixed size value since the result
     // is likely to fit on the stack. So here, EIGEN_SIZE_MIN_PREFER_DYNAMIC is not what we want.
@@ -104,7 +104,7 @@
   EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename internal::traits<OtherDerived>::Scalar>::value),
     YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
 
-  enum { Dimension = EIGEN_SIZE_MIN_PREFER_DYNAMIC(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) };
+  enum { Dimension = internal::min_size_prefer_dynamic(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) };
 
   typedef Matrix<Scalar, Dimension, 1> VectorType;
   typedef Matrix<Scalar, Dimension, Dimension> MatrixType;
diff --git a/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
index a1408a6..ce2b688 100644
--- a/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
+++ b/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
@@ -208,7 +208,7 @@
                       &&  (!NumTraits<Scalar>::IsComplex)
     };
     typedef typename internal::conditional<TransposeInput,Transpose<const ActualMatrixType>, ActualMatrixType const&>::type RowMajorWrapper;
-    EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MatrixWrapper::MatrixFree,UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY);
+    EIGEN_STATIC_ASSERT(internal::check_implication(MatrixWrapper::MatrixFree,UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY);
     typedef typename internal::conditional<UpLo==(Lower|Upper),
                                            RowMajorWrapper,
                                            typename MatrixWrapper::template ConstSelfAdjointViewReturnType<UpLo>::Type
diff --git a/Eigen/src/Jacobi/Jacobi.h b/Eigen/src/Jacobi/Jacobi.h
index 55a94c2..6a533a06 100644
--- a/Eigen/src/Jacobi/Jacobi.h
+++ b/Eigen/src/Jacobi/Jacobi.h
@@ -474,7 +474,7 @@
   apply_rotation_in_the_plane_selector<
     Scalar,OtherScalar,
     VectorX::SizeAtCompileTime,
-    EIGEN_PLAIN_ENUM_MIN(evaluator<VectorX>::Alignment, evaluator<VectorY>::Alignment),
+    plain_enum_min(evaluator<VectorX>::Alignment, evaluator<VectorY>::Alignment),
     Vectorizable>::run(x,incrx,y,incry,size,c,s);
 }
 
diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h
index 1dd0a4e..fce7c34 100644
--- a/Eigen/src/LU/FullPivLU.h
+++ b/Eigen/src/LU/FullPivLU.h
@@ -616,9 +616,9 @@
 {
   EIGEN_MAKE_KERNEL_HELPERS(FullPivLU<MatrixType_>)
 
-  enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(
-            MatrixType::MaxColsAtCompileTime,
-            MatrixType::MaxRowsAtCompileTime)
+  enum { MaxSmallDimAtCompileTime = min_size_prefer_fixed(
+              MatrixType::MaxColsAtCompileTime,
+              MatrixType::MaxRowsAtCompileTime)
   };
 
   template<typename Dest> void evalTo(Dest& dst) const
@@ -702,9 +702,9 @@
 {
   EIGEN_MAKE_IMAGE_HELPERS(FullPivLU<MatrixType_>)
 
-  enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(
-            MatrixType::MaxColsAtCompileTime,
-            MatrixType::MaxRowsAtCompileTime)
+  enum { MaxSmallDimAtCompileTime = min_size_prefer_fixed(
+              MatrixType::MaxColsAtCompileTime,
+              MatrixType::MaxRowsAtCompileTime)
   };
 
   template<typename Dest> void evalTo(Dest& dst) const
diff --git a/Eigen/src/LU/InverseImpl.h b/Eigen/src/LU/InverseImpl.h
index 9c1a605..050737b 100644
--- a/Eigen/src/LU/InverseImpl.h
+++ b/Eigen/src/LU/InverseImpl.h
@@ -311,7 +311,7 @@
     if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
       dst.resize(dstRows, dstCols);
     
-    const int Size = EIGEN_PLAIN_ENUM_MIN(XprType::ColsAtCompileTime,DstXprType::ColsAtCompileTime);
+    const int Size = plain_enum_min(XprType::ColsAtCompileTime, DstXprType::ColsAtCompileTime);
     EIGEN_ONLY_USED_FOR_DEBUG(Size);
     eigen_assert(( (Size<=1) || (Size>4) || (extract_data(src.nestedExpression())!=extract_data(dst)))
               && "Aliasing problem detected in inverse(), you need to do inverse().eval() here.");
diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h
index 4034745..aba4a67 100644
--- a/Eigen/src/LU/PartialPivLU.h
+++ b/Eigen/src/LU/PartialPivLU.h
@@ -514,7 +514,7 @@
   partial_lu_impl
     < typename MatrixType::Scalar, MatrixType::Flags&RowMajorBit?RowMajor:ColMajor,
       typename TranspositionType::StorageIndex,
-      EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime)>
+      internal::min_size_prefer_fixed(MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime)>
     ::blocked_lu(lu.rows(), lu.cols(), &lu.coeffRef(0,0), lu.outerStride(), &row_transpositions.coeffRef(0), nb_transpositions);
 }
 
diff --git a/Eigen/src/LU/PartialPivLU_LAPACKE.h b/Eigen/src/LU/PartialPivLU_LAPACKE.h
index 2f244f6..b636442 100644
--- a/Eigen/src/LU/PartialPivLU_LAPACKE.h
+++ b/Eigen/src/LU/PartialPivLU_LAPACKE.h
@@ -39,44 +39,55 @@
 
 namespace internal {
 
-/** \internal Specialization for the data types supported by LAPACKe */
+namespace lapacke_helpers {
+// -------------------------------------------------------------------------------------------------------------------
+//        Generic lapacke partial lu implementation that converts arguments and dispatches to the function above
+// -------------------------------------------------------------------------------------------------------------------
 
-#define EIGEN_LAPACKE_LU_PARTPIV(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX) \
-template<int StorageOrder> \
-struct partial_lu_impl<EIGTYPE, StorageOrder, lapack_int> \
-{ \
-  /* \internal performs the LU decomposition in-place of the matrix represented */ \
-  static lapack_int blocked_lu(Index rows, Index cols, EIGTYPE* lu_data, Index luStride, lapack_int* row_transpositions, lapack_int& nb_transpositions, lapack_int maxBlockSize=256) \
-  { \
-    EIGEN_UNUSED_VARIABLE(maxBlockSize);\
-    lapack_int matrix_order, first_zero_pivot; \
-    lapack_int m, n, lda, *ipiv, info; \
-    EIGTYPE* a; \
-/* Set up parameters for ?getrf */ \
-    matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
-    lda = convert_index<lapack_int>(luStride); \
-    a = lu_data; \
-    ipiv = row_transpositions; \
-    m = convert_index<lapack_int>(rows); \
-    n = convert_index<lapack_int>(cols); \
-    nb_transpositions = 0; \
-\
-    info = LAPACKE_##LAPACKE_PREFIX##getrf( matrix_order, m, n, (LAPACKE_TYPE*)a, lda, ipiv ); \
-\
-    for(int i=0;i<m;i++) { ipiv[i]--; if (ipiv[i]!=i) nb_transpositions++; } \
-\
-    eigen_assert(info >= 0); \
-/* something should be done with nb_transpositions */ \
-\
-    first_zero_pivot = info; \
-    return first_zero_pivot; \
-  } \
+template<typename Scalar, int StorageOrder>
+struct lapacke_partial_lu {
+  /** \internal performs the LU decomposition in-place of the matrix represented */
+  static lapack_int blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, lapack_int* row_transpositions,
+  lapack_int& nb_transpositions, lapack_int maxBlockSize=256)
+  {
+    EIGEN_UNUSED_VARIABLE(maxBlockSize);
+    // Set up parameters for getrf
+    lapack_int matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR;
+    lapack_int lda = to_lapack(luStride);
+    Scalar* a = lu_data;
+    lapack_int* ipiv = row_transpositions;
+    lapack_int m = to_lapack(rows);
+    lapack_int n = to_lapack(cols);
+    nb_transpositions = 0;
+
+    lapack_int info = getrf(matrix_order, m, n, to_lapack(a), lda, ipiv );
+    eigen_assert(info >= 0);
+
+    for(int i=0; i<m; i++) {
+      ipiv[i]--;
+      if (ipiv[i] != i) nb_transpositions++;
+    }
+    lapack_int first_zero_pivot = info;
+    return first_zero_pivot;
+  }
 };
+} // end namespace lapacke_helpers
 
-EIGEN_LAPACKE_LU_PARTPIV(double, double, d)
-EIGEN_LAPACKE_LU_PARTPIV(float, float, s)
-EIGEN_LAPACKE_LU_PARTPIV(dcomplex, lapack_complex_double, z)
-EIGEN_LAPACKE_LU_PARTPIV(scomplex, lapack_complex_float,  c)
+/*
+ * Here, we just put the generic implementation from lapacke_partial_lu into a partial specialization of the partial_lu_impl
+ * type. This specialization is more specialized than the generic implementations that Eigen implements, so if the
+ * Scalar type matches they will be chosen.
+ */
+#define EIGEN_LAPACKE_PARTIAL_LU(EIGTYPE) \
+template<int StorageOrder>                \
+struct partial_lu_impl<EIGTYPE, StorageOrder, lapack_int, Dynamic> : public lapacke_helpers::lapacke_partial_lu<EIGTYPE, StorageOrder> {};
+
+EIGEN_LAPACKE_PARTIAL_LU(double)
+EIGEN_LAPACKE_PARTIAL_LU(float)
+EIGEN_LAPACKE_PARTIAL_LU(std::complex<double>)
+EIGEN_LAPACKE_PARTIAL_LU(std::complex<float>)
+
+#undef EIGEN_LAPACKE_PARTIAL_LU
 
 } // end namespace internal
 
diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h
index 0a61f22..dcb9e1a 100644
--- a/Eigen/src/QR/FullPivHouseholderQR.h
+++ b/Eigen/src/QR/FullPivHouseholderQR.h
@@ -76,8 +76,8 @@
     typedef internal::FullPivHouseholderQRMatrixQReturnType<MatrixType> MatrixQReturnType;
     typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
     typedef Matrix<StorageIndex, 1,
-                   EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime,RowsAtCompileTime), RowMajor, 1,
-                   EIGEN_SIZE_MIN_PREFER_FIXED(MaxColsAtCompileTime,MaxRowsAtCompileTime)> IntDiagSizeVectorType;
+                   internal::min_size_prefer_dynamic(ColsAtCompileTime,RowsAtCompileTime), RowMajor, 1,
+                   internal::min_size_prefer_fixed(MaxColsAtCompileTime, MaxRowsAtCompileTime)> IntDiagSizeVectorType;
     typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
     typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
     typedef typename internal::plain_col_type<MatrixType>::type ColVectorType;
diff --git a/Eigen/src/QR/HouseholderQR_LAPACKE.h b/Eigen/src/QR/HouseholderQR_LAPACKE.h
index ef67760..57c2f6a 100644
--- a/Eigen/src/QR/HouseholderQR_LAPACKE.h
+++ b/Eigen/src/QR/HouseholderQR_LAPACKE.h
@@ -40,28 +40,35 @@
 
 namespace internal {
 
-/** \internal Specialization for the data types supported by LAPACKe */
+namespace lapacke_helpers {
 
-#define EIGEN_LAPACKE_QR_NOPIV(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX) \
-template<typename MatrixQR, typename HCoeffs> \
-struct householder_qr_inplace_blocked<MatrixQR, HCoeffs, EIGTYPE, true> \
-{ \
-  static void run(MatrixQR& mat, HCoeffs& hCoeffs, Index = 32, \
-      typename MatrixQR::Scalar* = 0) \
-  { \
-    lapack_int m = (lapack_int) mat.rows(); \
-    lapack_int n = (lapack_int) mat.cols(); \
-    lapack_int lda = (lapack_int) mat.outerStride(); \
-    lapack_int matrix_order = (MatrixQR::IsRowMajor) ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
-    LAPACKE_##LAPACKE_PREFIX##geqrf( matrix_order, m, n, (LAPACKE_TYPE*)mat.data(), lda, (LAPACKE_TYPE*)hCoeffs.data()); \
-    hCoeffs.adjointInPlace(); \
-  } \
+template<typename MatrixQR, typename HCoeffs>
+struct lapacke_hqr
+{
+  static void run(MatrixQR& mat, HCoeffs& hCoeffs, Index = 32, typename MatrixQR::Scalar* = 0)
+  {
+    lapack_int m = to_lapack(mat.rows());
+    lapack_int n = to_lapack(mat.cols());
+    lapack_int lda = to_lapack(mat.outerStride());
+    lapack_int matrix_order = lapack_storage_of(mat);
+    geqrf(matrix_order, m, n, to_lapack(mat.data()), lda, to_lapack(hCoeffs.data()));
+    hCoeffs.adjointInPlace();
+  }
 };
 
-EIGEN_LAPACKE_QR_NOPIV(double, double, d)
-EIGEN_LAPACKE_QR_NOPIV(float, float, s)
-EIGEN_LAPACKE_QR_NOPIV(dcomplex, lapack_complex_double, z)
-EIGEN_LAPACKE_QR_NOPIV(scomplex, lapack_complex_float, c)
+}
+
+/** \internal Specialization for the data types supported by LAPACKe */
+#define EIGEN_LAPACKE_HH_QR(EIGTYPE) \
+template<typename MatrixQR, typename HCoeffs> \
+struct householder_qr_inplace_blocked<MatrixQR, HCoeffs, EIGTYPE, true> : public lapacke_helpers::lapacke_hqr<MatrixQR, HCoeffs> {};
+
+EIGEN_LAPACKE_HH_QR(double)
+EIGEN_LAPACKE_HH_QR(float)
+EIGEN_LAPACKE_HH_QR(std::complex<double>)
+EIGEN_LAPACKE_HH_QR(std::complex<float>)
+
+#undef EIGEN_LAPACKE_HH_QR
 
 } // end namespace internal
 
diff --git a/Eigen/src/SVD/BDCSVD.h b/Eigen/src/SVD/BDCSVD.h
index 8bb30cd..0ad453f 100644
--- a/Eigen/src/SVD/BDCSVD.h
+++ b/Eigen/src/SVD/BDCSVD.h
@@ -93,10 +93,10 @@
   enum {
     RowsAtCompileTime = MatrixType::RowsAtCompileTime,
     ColsAtCompileTime = MatrixType::ColsAtCompileTime,
-    DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime),
+    DiagSizeAtCompileTime = internal::min_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime),
     MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
     MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
-    MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime),
+    MaxDiagSizeAtCompileTime = internal::min_size_prefer_fixed(MaxRowsAtCompileTime, MaxColsAtCompileTime),
     MatrixOptions = MatrixType::Options
   };
 
diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h
index 91c95ec..e69d13a 100644
--- a/Eigen/src/SVD/JacobiSVD.h
+++ b/Eigen/src/SVD/JacobiSVD.h
@@ -499,10 +499,10 @@
     enum {
       RowsAtCompileTime = MatrixType::RowsAtCompileTime,
       ColsAtCompileTime = MatrixType::ColsAtCompileTime,
-      DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
+      DiagSizeAtCompileTime = internal::min_size_prefer_dynamic(RowsAtCompileTime,ColsAtCompileTime),
       MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
       MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
-      MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
+      MaxDiagSizeAtCompileTime = internal::min_size_prefer_fixed(MaxRowsAtCompileTime,MaxColsAtCompileTime),
       MatrixOptions = MatrixType::Options
     };
 
@@ -638,7 +638,7 @@
   m_computeThinV = (computationOptions & ComputeThinV) != 0;
   eigen_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U");
   eigen_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V");
-  eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
+  eigen_assert(internal::check_implication(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
               "JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns.");
   if (QRPreconditioner == FullPivHouseholderQRPreconditioner)
   {
diff --git a/Eigen/src/SVD/SVDBase.h b/Eigen/src/SVD/SVDBase.h
index 7ecaf21..1e58404 100644
--- a/Eigen/src/SVD/SVDBase.h
+++ b/Eigen/src/SVD/SVDBase.h
@@ -77,10 +77,10 @@
   enum {
     RowsAtCompileTime = MatrixType::RowsAtCompileTime,
     ColsAtCompileTime = MatrixType::ColsAtCompileTime,
-    DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
+    DiagSizeAtCompileTime = internal::min_size_prefer_dynamic(RowsAtCompileTime,ColsAtCompileTime),
     MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
     MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
-    MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
+    MaxDiagSizeAtCompileTime = internal::min_size_prefer_fixed(MaxRowsAtCompileTime,MaxColsAtCompileTime),
     MatrixOptions = MatrixType::Options
   };
 
@@ -355,7 +355,7 @@
   m_computeThinV = (computationOptions & ComputeThinV) != 0;
   eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U");
   eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V");
-  eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
+  eigen_assert(internal::check_implication(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
 	       "SVDBase: thin U and V are only available when your matrix has a dynamic number of columns.");
 
   m_diagSize = (std::min)(m_rows, m_cols);
diff --git a/Eigen/src/SparseCore/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h
index 878a759..6f433fc 100644
--- a/Eigen/src/SparseCore/SparseDenseProduct.h
+++ b/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -65,10 +65,18 @@
   
   static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col)
   {
-    typename Res::Scalar tmp(0);
-    for(LhsInnerIterator it(lhsEval,i); it ;++it)
-      tmp += it.value() * rhs.coeff(it.index(),col);
-    res.coeffRef(i,col) += alpha * tmp;
+    // Two accumulators, which breaks the dependency chain on the accumulator
+    // and allows more instruction-level parallelism in the following loop
+    typename Res::Scalar tmp_a(0);
+    typename Res::Scalar tmp_b(0);
+    for(LhsInnerIterator it(lhsEval,i); it ;++it) {
+      tmp_a += it.value() * rhs.coeff(it.index(), col);
+      ++it;
+      if(it) {
+        tmp_b += it.value() * rhs.coeff(it.index(), col);
+      }
+    }
+    res.coeffRef(i, col) += alpha * (tmp_a + tmp_b);
   }
   
 };
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h
index 9cb5d21..5d31c7d 100644
--- a/Eigen/src/SparseCore/SparseMatrix.h
+++ b/Eigen/src/SparseCore/SparseMatrix.h
@@ -288,10 +288,7 @@
     #else
     template<class SizesType>
     inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
-    #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
-        typename
-    #endif
-        SizesType::value_type())
+        typename SizesType::value_type())
     {
       EIGEN_UNUSED_VARIABLE(enableif);
       reserveInnerVectors(reserveSizes);
diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h
index ee5eafc..fd6547f 100644
--- a/Eigen/src/SparseCore/SparseMatrixBase.h
+++ b/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -139,8 +139,8 @@
     /** type of the equivalent dense matrix */
     typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType;
     /** type of the equivalent square matrix */
-    typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
-                          EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
+    typedef Matrix<Scalar, internal::max_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime),
+                           internal::max_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime)> SquareMatrixType;
 
     inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
     inline Derived& derived() { return *static_cast<Derived*>(this); }
diff --git a/Eigen/src/SparseCore/SparseUtil.h b/Eigen/src/SparseCore/SparseUtil.h
index 19b59d1..33049b9 100644
--- a/Eigen/src/SparseCore/SparseUtil.h
+++ b/Eigen/src/SparseCore/SparseUtil.h
@@ -65,10 +65,10 @@
 
 template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType;
 template<typename Lhs, typename Rhs,
-         int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct DenseSparseProductReturnType;
+         int InnerSize = internal::min_size_prefer_fixed(internal::traits<Lhs>::ColsAtCompileTime, internal::traits<Rhs>::RowsAtCompileTime)> struct DenseSparseProductReturnType;
          
 template<typename Lhs, typename Rhs,
-         int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct SparseDenseProductReturnType;
+         int InnerSize = internal::min_size_prefer_fixed(internal::traits<Lhs>::ColsAtCompileTime, internal::traits<Rhs>::RowsAtCompileTime)> struct SparseDenseProductReturnType;
 template<typename MatrixType,int UpLo> class SparseSymmetricPermutationProduct;
 
 namespace internal {
diff --git a/Eigen/src/misc/lapacke_helpers.h b/Eigen/src/misc/lapacke_helpers.h
new file mode 100644
index 0000000..6fff863
--- /dev/null
+++ b/Eigen/src/misc/lapacke_helpers.h
@@ -0,0 +1,159 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2021 Erik Schultheis <erik.schultheis@aalto.fi>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_LAPACKE_HELPERS_H
+#define EIGEN_LAPACKE_HELPERS_H
+
+#include "./InternalHeaderCheck.h"
+
+#ifdef EIGEN_USE_MKL
+#include "mkl_lapacke.h"
+#else
+#include "lapacke.h"
+#endif
+
+namespace Eigen {
+namespace internal {
+/**
+ * \internal
+ * \brief Implementation details and helper functions for the lapacke glue code.
+ */
+namespace lapacke_helpers {
+
+// ---------------------------------------------------------------------------------------------------------------------
+//                  Translation from Eigen to Lapacke for types and constants
+// ---------------------------------------------------------------------------------------------------------------------
+
+// For complex numbers, the types in Eigen and Lapacke are different, but layout compatible.
+template<typename Scalar>
+struct translate_type_imp;
+template<>
+struct translate_type_imp<float> {
+    using type = float;
+};
+template<>
+struct translate_type_imp<double> {
+    using type = double;
+};
+template<>
+struct translate_type_imp<std::complex<double>> {
+    using type = lapack_complex_double;
+};
+template<>
+struct translate_type_imp<std::complex<float>> {
+    using type = lapack_complex_float;
+};
+
+/// Given an Eigen types, this is defined to be the corresponding, layout-compatible lapack type
+template<typename Scalar>
+using translated_type = typename translate_type_imp<Scalar>::type;
+
+/// These functions convert their arguments from Eigen to Lapack types
+/// This function performs conversion for any of the translations defined above.
+template<typename Source, typename Target=translated_type<Source>>
+auto to_lapack(Source value) { return static_cast<Target>(value); }
+
+/// This function performs conversions for pointer types corresponding to the translations abovce.
+/// This is valid because the translations are between layout-compatible types.
+template<typename Source, typename Target=translated_type<Source>>
+auto to_lapack(Source *value) { return reinterpret_cast<Target*>(value); }
+
+/// This function converts the Eigen Index to a lapack index, with possible range checks
+/// \sa internal::convert_index
+lapack_int to_lapack(Index index) {
+  return convert_index<lapack_int>(index);
+}
+
+/// translates storage order of the given Eigen object to the corresponding lapack constant
+template<typename Derived>
+EIGEN_CONSTEXPR lapack_int lapack_storage_of(const EigenBase<Derived> &) {
+  return Derived::IsRowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR;
+}
+
+/// translate UpLo type to the corresponding letter code
+template<UpLoType mode> char translate_mode;
+template<> EIGEN_CONSTEXPR const char translate_mode<Lower> = 'L';
+template<> EIGEN_CONSTEXPR const char translate_mode<Upper> = 'U';
+
+
+// ---------------------------------------------------------------------------------------------------------------------
+//              Automatic generation of low-level wrappers
+// ---------------------------------------------------------------------------------------------------------------------
+
+/*!
+ * \internal
+ * \brief Helper type to facilitate the wrapping of raw LAPACKE functions for different types into a single, overloaded C++ function.
+ * This is achieved in combination with \r EIGEN_MAKE_LAPACKE_WRAPPER
+ * \details This implementation works by providing an overloaded call function that just forwards its arguments to the
+ * underlying lapack function. Each of these overloads is enabled only if the call is actually well formed.
+ * Because these lapack functions take pointers to the underlying scalar type as arguments, even though the actual Scalars
+ * would be implicitly convertible, the pointers are not and therefore only a single overload can be valid at the same time.
+ * Thus, despite all functions taking fully generic `Args&&... args` as arguments, there is never any ambiguity.
+ */
+template<typename DoubleFn, typename SingleFn, typename DoubleCpxFn, typename SingleCpxFn>
+struct WrappingHelper {
+  // The naming of double, single, double complex and single complex is purely for readability
+  // and doesn't actually affect the workings of this class. In principle, the arguments can
+  // be supplied in any permuted order.
+  DoubleFn double_; SingleFn single_; DoubleCpxFn double_cpx_; SingleCpxFn single_cpx_;
+
+  template<typename... Args>
+  auto call(Args&&... args) -> decltype(double_(std::forward<Args>(args)...)) {
+    return double_(std::forward<Args>(args)...);
+  }
+
+  template<typename... Args>
+  auto call(Args&&... args) -> decltype(single_(std::forward<Args>(args)...)){
+    return single_(std::forward<Args>(args)...);
+  }
+
+  template<typename... Args>
+  auto call(Args&&... args) -> decltype(double_cpx_(std::forward<Args>(args)...)){
+    return double_cpx_(std::forward<Args>(args)...);
+  }
+
+  template<typename... Args>
+  auto call(Args&&... args) -> decltype(single_cpx_(std::forward<Args>(args)...)){
+    return single_cpx_(std::forward<Args>(args)...);
+  }
+};
+
+/** \internal Helper function that generates a `WrappingHelper` object with the given function pointers and
+ * invokes its `call` method, thus selecting one of the overloads.
+ * \sa EIGEN_MAKE_LAPACKE_WRAPPER
+ */
+template<typename DoubleFn, typename SingleFn, typename DoubleCpxFn, typename SingleCpxFn, typename... Args>
+auto call_wrapper(DoubleFn df, SingleFn sf, DoubleCpxFn dcf, SingleCpxFn scf, Args&&... args) {
+  WrappingHelper<DoubleFn, SingleFn, DoubleCpxFn, SingleCpxFn> helper{df, sf, dcf, scf};
+  return helper.call(std::forward<Args>(args)...);
+}
+
+/**
+ * \internal
+ * Generates a new function `Function` that dispatches to the corresponding LAPACKE_? prefixed functions.
+ * \sa WrappingHelper
+ */
+#define EIGEN_MAKE_LAPACKE_WRAPPER(FUNCTION) \
+template<typename... Args> \
+auto FUNCTION(Args&&... args) { return call_wrapper(LAPACKE_d##FUNCTION, LAPACKE_s##FUNCTION, LAPACKE_z##FUNCTION, LAPACKE_c##FUNCTION, std::forward<Args>(args)...); }
+
+// Now with this macro and the helper wrappers, we can generate the dispatch for all the lapacke functions that are
+// used in Eigen.
+// We define these here instead of in the files where they are used because this allows us to #undef the macro again
+// right here
+EIGEN_MAKE_LAPACKE_WRAPPER(potrf)
+EIGEN_MAKE_LAPACKE_WRAPPER(getrf)
+EIGEN_MAKE_LAPACKE_WRAPPER(geqrf)
+
+#undef EIGEN_MAKE_LAPACKE_WRAPPER
+}
+}
+}
+
+#endif // EIGEN_LAPACKE_HELPERS_H
\ No newline at end of file
diff --git a/Eigen/src/plugins/IndexedViewMethods.h b/Eigen/src/plugins/IndexedViewMethods.h
index 463602a..8acd87e 100644
--- a/Eigen/src/plugins/IndexedViewMethods.h
+++ b/Eigen/src/plugins/IndexedViewMethods.h
@@ -105,8 +105,6 @@
   return Base::operator()(internal::eval_expr_given_size(rowIndices,rows()),internal::eval_expr_given_size(colIndices,cols()));
 }
 
-#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE
-
 // The following three overloads are needed to handle raw Index[N] arrays.
 
 template<typename RowIndicesT, std::size_t RowIndicesN, typename ColIndices>
@@ -133,7 +131,6 @@
                     (derived(), rowIndices, colIndices);
 }
 
-#endif // EIGEN_HAS_STATIC_ARRAY_TEMPLATE
 
 // Overloads for 1D vectors/arrays
 
@@ -178,8 +175,6 @@
   return Base::operator()(internal::eval_expr_given_size(id,size()));
 }
 
-#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE
-
 template<typename IndicesT, std::size_t IndicesN>
 typename internal::enable_if<IsRowMajor,
   IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,IvcIndex,const IndicesT (&)[IndicesN]> >::type
@@ -200,8 +195,6 @@
             (derived(), indices, IvcIndex(0));
 }
 
-#endif // EIGEN_HAS_STATIC_ARRAY_TEMPLATE
-
 #undef EIGEN_INDEXED_VIEW_METHOD_CONST
 #undef EIGEN_INDEXED_VIEW_METHOD_TYPE
 
diff --git a/blas/PackedTriangularMatrixVector.h b/blas/PackedTriangularMatrixVector.h
index 0039536..cc2a9b8 100644
--- a/blas/PackedTriangularMatrixVector.h
+++ b/blas/PackedTriangularMatrixVector.h
@@ -31,15 +31,16 @@
     typedef typename conj_expr_if<ConjLhs,LhsMap>::type ConjLhsType;
     typedef Map<Matrix<ResScalar,Dynamic,1> > ResMap;
 
-    for (Index i=0; i<size; ++i)
-    {
-      Index s = IsLower&&(HasUnitDiag||HasZeroDiag) ? 1 : 0;
-      Index r = IsLower ? size-i: i+1;
-      if (EIGEN_IMPLIES(HasUnitDiag||HasZeroDiag, (--r)>0))
-	ResMap(res+(IsLower ? s+i : 0),r) += alpha * cj(rhs[i]) * ConjLhsType(LhsMap(lhs+s,r));
-      if (HasUnitDiag)
-	res[i] += alpha * cj(rhs[i]);
-      lhs += IsLower ? size-i: i+1;
+    for (Index i = 0; i < size; ++i) {
+      Index s = IsLower && (HasUnitDiag || HasZeroDiag) ? 1 : 0;
+      Index r = IsLower ? size - i : i + 1;
+      if (!(HasUnitDiag || HasZeroDiag) || (--r > 0)) {
+        ResMap(res + (IsLower ? s + i : 0), r) += alpha * cj(rhs[i]) * ConjLhsType(LhsMap(lhs + s, r));
+      }
+      if (HasUnitDiag) {
+        res[i] += alpha * cj(rhs[i]);
+      }
+      lhs += IsLower ? size - i : i + 1;
     }
   };
 };
@@ -61,15 +62,16 @@
     typedef Map<const Matrix<RhsScalar,Dynamic,1> > RhsMap;
     typedef typename conj_expr_if<ConjRhs,RhsMap>::type ConjRhsType;
 
-    for (Index i=0; i<size; ++i)
-    {
-      Index s = !IsLower&&(HasUnitDiag||HasZeroDiag) ? 1 : 0;
-      Index r = IsLower ? i+1 : size-i;
-      if (EIGEN_IMPLIES(HasUnitDiag||HasZeroDiag, (--r)>0))
-	res[i] += alpha * (ConjLhsType(LhsMap(lhs+s,r)).cwiseProduct(ConjRhsType(RhsMap(rhs+(IsLower ? 0 : s+i),r)))).sum();
-      if (HasUnitDiag)
-	res[i] += alpha * cj(rhs[i]);
-      lhs += IsLower ? i+1 : size-i;
+    for (Index i = 0; i < size; ++i) {
+      Index s = !IsLower && (HasUnitDiag || HasZeroDiag) ? 1 : 0;
+      Index r = IsLower ? i + 1 : size - i;
+      if (!(HasUnitDiag || HasZeroDiag) || (--r > 0)) {
+        res[i] += alpha * (ConjLhsType(LhsMap(lhs + s, r)).cwiseProduct(ConjRhsType(RhsMap(rhs + (IsLower ? 0 : s + i), r)))).sum();
+      }
+      if (HasUnitDiag) {
+        res[i] += alpha * cj(rhs[i]);
+      }
+      lhs += IsLower ? i + 1 : size - i;
     }
   };
 };
diff --git a/cmake/EigenTesting.cmake b/cmake/EigenTesting.cmake
index 3fce0f5..3581941 100644
--- a/cmake/EigenTesting.cmake
+++ b/cmake/EigenTesting.cmake
@@ -28,7 +28,9 @@
     set(is_gpu_test ON)
     if(EIGEN_TEST_HIP)
       hip_reset_flags()
-      hip_add_executable(${targetname} ${filename} HIPCC_OPTIONS "-DEIGEN_USE_HIP ${ARGV2}")
+      hip_add_executable(${targetname} ${filename} HIPCC_OPTIONS -std=c++14)
+      target_compile_definitions(${targetname} PRIVATE -DEIGEN_USE_HIP)
+      set_property(TARGET ${targetname} PROPERTY HIP_ARCHITECTURES gfx900 gfx906 gfx908 gfx90a gfx1030)
     elseif(EIGEN_TEST_CUDA_CLANG)
       set_source_files_properties(${filename} PROPERTIES LANGUAGE CXX)
       
@@ -38,22 +40,14 @@
         link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib")
       endif()
 
-      if (${ARGC} GREATER 2)
-        add_executable(${targetname} ${filename})
-      else()
-        add_executable(${targetname} ${filename} OPTIONS ${ARGV2})
-      endif()
+      add_executable(${targetname} ${filename})
       set(CUDA_CLANG_LINK_LIBRARIES "cudart_static" "cuda" "dl" "pthread")
       if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
       set(CUDA_CLANG_LINK_LIBRARIES ${CUDA_CLANG_LINK_LIBRARIES} "rt")
       endif()
       target_link_libraries(${targetname} ${CUDA_CLANG_LINK_LIBRARIES})
     else()
-      if (${ARGC} GREATER 2)
-        cuda_add_executable(${targetname} ${filename} OPTIONS ${ARGV2})
-      else()
-        cuda_add_executable(${targetname} ${filename})
-      endif()
+      cuda_add_executable(${targetname} ${filename})
     endif()
   else()
     add_executable(${targetname} ${filename})
@@ -66,26 +60,26 @@
   endif()
 
   if(EIGEN_NO_ASSERTION_CHECKING)
-    ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_NO_ASSERTION_CHECKING=1")
+    target_compile_definitions(${targetname} PRIVATE EIGEN_NO_ASSERTION_CHECKING=1)
   else()
     if(EIGEN_DEBUG_ASSERTS)
-      ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_DEBUG_ASSERTS=1")
+      target_compile_definitions(${targetname} PRIVATE EIGEN_DEBUG_ASSERTS=1)
     endif()
   endif()
 
-  ei_add_target_property(${targetname} COMPILE_FLAGS "-DEIGEN_TEST_MAX_SIZE=${EIGEN_TEST_MAX_SIZE}")
+  target_compile_definitions(${targetname} PRIVATE EIGEN_TEST_MAX_SIZE=${EIGEN_TEST_MAX_SIZE})
 
   if(MSVC)
-    ei_add_target_property(${targetname} COMPILE_FLAGS "/bigobj")
+    target_compile_options(${targetname} PRIVATE "/bigobj")
   endif()
 
   # let the user pass flags.
   if(${ARGC} GREATER 2)
-    ei_add_target_property(${targetname} COMPILE_FLAGS "${ARGV2}")
+    target_compile_options(${targetname} PRIVATE ${ARGV2})
   endif()
 
   if(EIGEN_TEST_CUSTOM_CXX_FLAGS)
-    ei_add_target_property(${targetname} COMPILE_FLAGS "${EIGEN_TEST_CUSTOM_CXX_FLAGS}")
+    target_compile_options(${targetname} PRIVATE ${EIGEN_TEST_CUSTOM_CXX_FLAGS})
   endif()
 
   if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
@@ -216,12 +210,13 @@
   if( (EIGEN_SPLIT_LARGE_TESTS AND suffixes) OR explicit_suffixes)
     add_custom_target(${testname})
     foreach(suffix ${suffixes})
-      ei_add_test_internal(${testname} ${testname}_${suffix}
-        "${ARGV1} -DEIGEN_TEST_PART_${suffix}=1" "${ARGV2}")
+      ei_add_test_internal(${testname} ${testname}_${suffix} "${ARGV1}" "${ARGV2}")
       add_dependencies(${testname} ${testname}_${suffix})
+      target_compile_definitions(${testname}_${suffix} PRIVATE -DEIGEN_TEST_PART_${suffix}=1)
     endforeach()
   else()
-    ei_add_test_internal(${testname} ${testname} "${ARGV1} -DEIGEN_TEST_PART_ALL=1" "${ARGV2}")
+    ei_add_test_internal(${testname} ${testname} "${ARGV1}" "${ARGV2}")
+    target_compile_definitions(${testname} PRIVATE -DEIGEN_TEST_PART_ALL=1)
   endif()
 endmacro()
 
diff --git a/cmake/FindBLAS.cmake b/cmake/FindBLAS.cmake
deleted file mode 100644
index 1bb8f19..0000000
--- a/cmake/FindBLAS.cmake
+++ /dev/null
@@ -1,1407 +0,0 @@
-###
-#
-# @copyright (c) 2009-2014 The University of Tennessee and The University
-#                          of Tennessee Research Foundation.
-#                          All rights reserved.
-# @copyright (c) 2012-2016 Inria. All rights reserved.
-# @copyright (c) 2012-2014 Bordeaux INP, CNRS (LaBRI UMR 5800), Inria, Univ. Bordeaux. All rights reserved.
-#
-###
-#
-# - Find BLAS library
-# This module finds an installed fortran library that implements the BLAS
-# linear-algebra interface (see http://www.netlib.org/blas/).
-# The list of libraries searched for is taken
-# from the autoconf macro file, acx_blas.m4 (distributed at
-# http://ac-archive.sourceforge.net/ac-archive/acx_blas.html).
-#
-# This module sets the following variables:
-#  BLAS_FOUND - set to true if a library implementing the BLAS interface
-#    is found
-#  BLAS_LINKER_FLAGS - uncached list of required linker flags (excluding -l
-#    and -L).
-#  BLAS_COMPILER_FLAGS - uncached list of required compiler flags (including -I for mkl headers).
-#  BLAS_LIBRARIES - uncached list of libraries (using full path name) to
-#    link against to use BLAS
-#  BLAS95_LIBRARIES - uncached list of libraries (using full path name)
-#    to link against to use BLAS95 interface
-#  BLAS95_FOUND - set to true if a library implementing the BLAS f95 interface
-#    is found
-#  BLA_STATIC  if set on this determines what kind of linkage we do (static)
-#  BLA_VENDOR  if set checks only the specified vendor, if not set checks
-#     all the possibilities
-#  BLAS_VENDOR_FOUND stores the BLAS vendor found 
-#  BLA_F95     if set on tries to find the f95 interfaces for BLAS/LAPACK
-# The user can give specific paths where to find the libraries adding cmake
-# options at configure (ex: cmake path/to/project -DBLAS_DIR=path/to/blas):
-#  BLAS_DIR            - Where to find the base directory of blas
-#  BLAS_INCDIR         - Where to find the header files
-#  BLAS_LIBDIR         - Where to find the library files
-# The module can also look for the following environment variables if paths
-# are not given as cmake variable: BLAS_DIR, BLAS_INCDIR, BLAS_LIBDIR
-# For MKL case and if no paths are given as hints, we will try to use the MKLROOT
-# environment variable
-#  BLAS_VERBOSE Print some additional information during BLAS libraries detection
-##########
-### List of vendors (BLA_VENDOR) valid in this module
-########## List of vendors (BLA_VENDOR) valid in this module
-##  Open (for OpenBlas), Eigen (for EigenBlas), Goto, ATLAS PhiPACK,
-##  CXML, DXML, SunPerf, SCSL, SGIMATH, IBMESSL, IBMESSLMT
-##  Intel10_32 (intel mkl v10 32 bit), Intel10_64lp (intel mkl v10 64 bit,lp thread model, lp64 model),
-##  Intel10_64lp_seq (intel mkl v10 64 bit,sequential code, lp64 model),
-##  Intel( older versions of mkl 32 and 64 bit),
-##  ACML, ACML_MP, ACML_GPU, Apple, NAS, Generic
-# C/CXX should be enabled to use Intel mkl
-###
-# We handle different modes to find the dependency
-#
-# - Detection if already installed on the system
-#   - BLAS libraries can be detected from different ways
-#     Here is the order of precedence:
-#     1) we look in cmake variable BLAS_LIBDIR or BLAS_DIR (we guess the libdirs) if defined
-#     2) we look in environment variable BLAS_LIBDIR or BLAS_DIR (we guess the libdirs) if defined
-#     3) we look in common environnment variables depending on the system (INCLUDE, C_INCLUDE_PATH, CPATH - LIB, DYLD_LIBRARY_PATH, LD_LIBRARY_PATH)
-#     4) we look in common system paths depending on the system, see for example paths contained in the following cmake variables:
-#       - CMAKE_PLATFORM_IMPLICIT_INCLUDE_DIRECTORIES, CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES
-#       - CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES, CMAKE_C_IMPLICIT_LINK_DIRECTORIES
-#
-
-#=============================================================================
-# Copyright 2007-2009 Kitware, Inc.
-#
-# Distributed under the OSI-approved BSD License (the "License");
-# see accompanying file Copyright.txt for details.
-#
-# This software is distributed WITHOUT ANY WARRANTY; without even the
-# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the License for more information.
-#=============================================================================
-# (To distribute this file outside of CMake, substitute the full
-#  License text for the above reference.)
-
-## Some macros to print status when search for headers and libs
-# This macro informs why the _lib_to_find file has not been found
-macro(Print_Find_Library_Blas_Status _libname _lib_to_find)
-
-  # save _libname upper/lower case
-  string(TOUPPER ${_libname} LIBNAME)
-  string(TOLOWER ${_libname} libname)
-
-  # print status
-  #message(" ")
-  if(${LIBNAME}_LIBDIR)
-    message("${Yellow}${LIBNAME}_LIBDIR is defined but ${_lib_to_find}"
-      "has not been found in ${ARGN}${ColourReset}")
-  else()
-    if(${LIBNAME}_DIR)
-      message("${Yellow}${LIBNAME}_DIR is defined but ${_lib_to_find}"
-	"has not been found in ${ARGN}${ColourReset}")
-    else()
-      message("${Yellow}${_lib_to_find} not found."
-	"Nor ${LIBNAME}_DIR neither ${LIBNAME}_LIBDIR"
-	"are defined so that we look for ${_lib_to_find} in"
-	"system paths (Linux: LD_LIBRARY_PATH, Windows: LIB,"
-	"Mac: DYLD_LIBRARY_PATH,"
-	"CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES,"
-	"CMAKE_C_IMPLICIT_LINK_DIRECTORIES)${ColourReset}")
-      if(_lib_env)
-	message("${Yellow}${_lib_to_find} has not been found in"
-	  "${_lib_env}${ColourReset}")
-      endif()
-    endif()
-  endif()
-  message("${BoldYellow}Please indicate where to find ${_lib_to_find}. You have three options:\n"
-    "- Option 1: Provide the Installation directory of BLAS library with cmake option: -D${LIBNAME}_DIR=your/path/to/${libname}/\n"
-    "- Option 2: Provide the directory where to find the library with cmake option: -D${LIBNAME}_LIBDIR=your/path/to/${libname}/lib/\n"
-    "- Option 3: Update your environment variable (Linux: LD_LIBRARY_PATH, Windows: LIB, Mac: DYLD_LIBRARY_PATH)\n"
-    "- Option 4: If your library provides a PkgConfig file, make sure pkg-config finds your library${ColourReset}")
-
-endmacro()
-
-# This macro informs why the _lib_to_find file has not been found
-macro(Print_Find_Library_Blas_CheckFunc_Status _name)
-
-  # save _libname upper/lower case
-  string(TOUPPER ${_name} FUNCNAME)
-  string(TOLOWER ${_name} funcname)
-
-  # print status
-  #message(" ")
-  message("${Red}Libs have been found but check of symbol ${_name} failed "
-    "with following libraries ${ARGN}${ColourReset}")
-  message("${BoldRed}Please open your error file CMakeFiles/CMakeError.log"
-    "to figure out why it fails${ColourReset}")
-  #message(" ")
-
-endmacro()
-
-if (NOT BLAS_FOUND)
-  set(BLAS_DIR "" CACHE PATH "Installation directory of BLAS library")
-  if (NOT BLAS_FIND_QUIETLY)
-    message(STATUS "A cache variable, namely BLAS_DIR, has been set to specify the install directory of BLAS")
-  endif()
-endif()
-
-option(BLAS_VERBOSE "Print some additional information during BLAS libraries detection" OFF)
-mark_as_advanced(BLAS_VERBOSE)
-
-include(CheckFunctionExists)
-include(CheckFortranFunctionExists)
-include(CMakeFindDependencyMacro)
-
-set(_blas_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
-
-# Check the language being used
-get_property( _LANGUAGES_ GLOBAL PROPERTY ENABLED_LANGUAGES )
-if( _LANGUAGES_ MATCHES Fortran AND CMAKE_Fortran_COMPILER)
-  set( _CHECK_FORTRAN TRUE )
-elseif( (_LANGUAGES_ MATCHES C) OR (_LANGUAGES_ MATCHES CXX) )
-  set( _CHECK_FORTRAN FALSE )
-else()
-  if(BLAS_FIND_REQUIRED)
-    message(FATAL_ERROR "FindBLAS requires Fortran, C, or C++ to be enabled.")
-  else()
-    message(STATUS "Looking for BLAS... - NOT found (Unsupported languages)")
-    return()
-  endif()
-endif()
-
-macro(Check_Fortran_Libraries LIBRARIES _prefix _name _flags _list _thread)
-  # This macro checks for the existence of the combination of fortran libraries
-  # given by _list.  If the combination is found, this macro checks (using the
-  # Check_Fortran_Function_Exists macro) whether can link against that library
-  # combination using the name of a routine given by _name using the linker
-  # flags given by _flags.  If the combination of libraries is found and passes
-  # the link test, LIBRARIES is set to the list of complete library paths that
-  # have been found.  Otherwise, LIBRARIES is set to FALSE.
-
-  # N.B. _prefix is the prefix applied to the names of all cached variables that
-  # are generated internally and marked advanced by this macro.
-
-  set(_libdir ${ARGN})
-
-  set(_libraries_work TRUE)
-  set(${LIBRARIES})
-  set(_combined_name)
-  set(ENV_MKLROOT "$ENV{MKLROOT}")
-  set(ENV_BLAS_DIR "$ENV{BLAS_DIR}")
-  set(ENV_BLAS_LIBDIR "$ENV{BLAS_LIBDIR}")
-  if (NOT _libdir)
-    if (BLAS_LIBDIR)
-      list(APPEND _libdir "${BLAS_LIBDIR}")
-    elseif (BLAS_DIR)
-      list(APPEND _libdir "${BLAS_DIR}")
-      list(APPEND _libdir "${BLAS_DIR}/lib")
-      if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
-	list(APPEND _libdir "${BLAS_DIR}/lib64")
-	list(APPEND _libdir "${BLAS_DIR}/lib/intel64")
-      else()
-	list(APPEND _libdir "${BLAS_DIR}/lib32")
-	list(APPEND _libdir "${BLAS_DIR}/lib/ia32")
-      endif()
-    elseif(ENV_BLAS_LIBDIR)
-      list(APPEND _libdir "${ENV_BLAS_LIBDIR}")
-    elseif(ENV_BLAS_DIR)
-      list(APPEND _libdir "${ENV_BLAS_DIR}")
-      list(APPEND _libdir "${ENV_BLAS_DIR}/lib")
-      if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
-	list(APPEND _libdir "${ENV_BLAS_DIR}/lib64")
-	list(APPEND _libdir "${ENV_BLAS_DIR}/lib/intel64")
-      else()
-	list(APPEND _libdir "${ENV_BLAS_DIR}/lib32")
-	list(APPEND _libdir "${ENV_BLAS_DIR}/lib/ia32")
-      endif()
-    else()
-      if (ENV_MKLROOT)
-	list(APPEND _libdir "${ENV_MKLROOT}/lib")
-	if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
-	  list(APPEND _libdir "${ENV_MKLROOT}/lib64")
-	  list(APPEND _libdir "${ENV_MKLROOT}/lib/intel64")
-	else()
-	  list(APPEND _libdir "${ENV_MKLROOT}/lib32")
-	  list(APPEND _libdir "${ENV_MKLROOT}/lib/ia32")
-	endif()
-      endif()
-      if (WIN32)
-	string(REPLACE ":" ";" _libdir2 "$ENV{LIB}")
-      elseif (APPLE)
-	string(REPLACE ":" ";" _libdir2 "$ENV{DYLD_LIBRARY_PATH}")
-      else ()
-	string(REPLACE ":" ";" _libdir2 "$ENV{LD_LIBRARY_PATH}")
-      endif ()
-      list(APPEND _libdir "${_libdir2}")
-      list(APPEND _libdir "${CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES}")
-      list(APPEND _libdir "${CMAKE_C_IMPLICIT_LINK_DIRECTORIES}")
-    endif()
-  endif ()
-
-  if (BLAS_VERBOSE)
-    message("${Cyan}Try to find BLAS libraries: ${_list}")
-  endif ()
-
-  foreach(_library ${_list})
-    set(_combined_name ${_combined_name}_${_library})
-
-    if(_libraries_work)
-      if (BLA_STATIC)
-	if (WIN32)
-	  set(CMAKE_FIND_LIBRARY_SUFFIXES .lib ${CMAKE_FIND_LIBRARY_SUFFIXES})
-	endif ()
-	if (APPLE)
-	  set(CMAKE_FIND_LIBRARY_SUFFIXES .lib ${CMAKE_FIND_LIBRARY_SUFFIXES})
-	else ()
-	  set(CMAKE_FIND_LIBRARY_SUFFIXES .a ${CMAKE_FIND_LIBRARY_SUFFIXES})
-	endif ()
-      else ()
-	if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
-	  # for ubuntu's libblas3gf and liblapack3gf packages
-	  set(CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES} .so.3gf)
-	endif ()
-      endif ()
-      find_library(${_prefix}_${_library}_LIBRARY
-	NAMES ${_library}
-	HINTS ${_libdir}
-	NO_DEFAULT_PATH
-	)
-      mark_as_advanced(${_prefix}_${_library}_LIBRARY)
-      # Print status if not found
-      # -------------------------
-      if (NOT ${_prefix}_${_library}_LIBRARY AND NOT BLAS_FIND_QUIETLY AND BLAS_VERBOSE)
-	Print_Find_Library_Blas_Status(blas ${_library} ${_libdir})
-      endif ()
-      set(${LIBRARIES} ${${LIBRARIES}} ${${_prefix}_${_library}_LIBRARY})
-      set(_libraries_work ${${_prefix}_${_library}_LIBRARY})
-    endif()
-  endforeach()
-
-  if(_libraries_work)
-    # Test this combination of libraries.
-    if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND BLA_STATIC)
-      list(INSERT ${LIBRARIES} 0 "-Wl,--start-group")
-      list(APPEND ${LIBRARIES} "-Wl,--end-group")
-    endif()
-    set(CMAKE_REQUIRED_LIBRARIES "${_flags};${${LIBRARIES}};${_thread}")
-    set(CMAKE_REQUIRED_FLAGS "${BLAS_COMPILER_FLAGS}")
-    if (BLAS_VERBOSE)
-      message("${Cyan}BLAS libs found for BLA_VENDOR ${BLA_VENDOR}."
-	"Try to compile symbol ${_name} with following libraries:"
-	"${CMAKE_REQUIRED_LIBRARIES}")
-    endif ()
-    if(NOT BLAS_FOUND)
-      unset(${_prefix}${_combined_name}_WORKS CACHE)
-    endif()
-    if (_CHECK_FORTRAN)
-      if (CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
-	string(REPLACE "mkl_intel_lp64" "mkl_gf_lp64" CMAKE_REQUIRED_LIBRARIES "${CMAKE_REQUIRED_LIBRARIES}")
-	string(REPLACE "mkl_intel_ilp64" "mkl_gf_ilp64" CMAKE_REQUIRED_LIBRARIES "${CMAKE_REQUIRED_LIBRARIES}")
-      endif()
-      check_fortran_function_exists("${_name}" ${_prefix}${_combined_name}_WORKS)
-    else()
-      check_function_exists("${_name}_" ${_prefix}${_combined_name}_WORKS)
-    endif()
-    mark_as_advanced(${_prefix}${_combined_name}_WORKS)
-    set(_libraries_work ${${_prefix}${_combined_name}_WORKS})
-    # Print status if not found
-    # -------------------------
-    if (NOT _libraries_work AND NOT BLAS_FIND_QUIETLY AND BLAS_VERBOSE)
-      Print_Find_Library_Blas_CheckFunc_Status(${_name} ${CMAKE_REQUIRED_LIBRARIES})
-    endif ()
-    set(CMAKE_REQUIRED_LIBRARIES)
-  endif()
-
-  if(_libraries_work)
-    set(${LIBRARIES} ${${LIBRARIES}} ${_thread})
-  else()
-    set(${LIBRARIES} FALSE)
-  endif()
-
-endmacro()
-
-
-set(BLAS_LINKER_FLAGS)
-set(BLAS_LIBRARIES)
-set(BLAS95_LIBRARIES)
-if ($ENV{BLA_VENDOR} MATCHES ".+")
-  set(BLA_VENDOR $ENV{BLA_VENDOR})
-else ()
-  if(NOT BLA_VENDOR)
-    set(BLA_VENDOR "All")
-  endif()
-endif ()
-
-#BLAS in intel mkl 10 library? (em64t 64bit)
-if (BLA_VENDOR MATCHES "Intel*" OR BLA_VENDOR STREQUAL "All")
-
-  if(NOT BLAS_LIBRARIES OR BLA_VENDOR MATCHES "Intel*")
-    # Looking for include
-    # -------------------
-
-    # Add system include paths to search include
-    # ------------------------------------------
-    unset(_inc_env)
-    set(ENV_MKLROOT "$ENV{MKLROOT}")
-    set(ENV_BLAS_DIR "$ENV{BLAS_DIR}")
-    set(ENV_BLAS_INCDIR "$ENV{BLAS_INCDIR}")
-    if(ENV_BLAS_INCDIR)
-      list(APPEND _inc_env "${ENV_BLAS_INCDIR}")
-    elseif(ENV_BLAS_DIR)
-      list(APPEND _inc_env "${ENV_BLAS_DIR}")
-      list(APPEND _inc_env "${ENV_BLAS_DIR}/include")
-    else()
-      if (ENV_MKLROOT)
-	list(APPEND _inc_env "${ENV_MKLROOT}/include")
-      endif()
-      # system variables
-      if(WIN32)
-	string(REPLACE ":" ";" _path_env "$ENV{INCLUDE}")
-	list(APPEND _inc_env "${_path_env}")
-      else()
-	string(REPLACE ":" ";" _path_env "$ENV{INCLUDE}")
-	list(APPEND _inc_env "${_path_env}")
-	string(REPLACE ":" ";" _path_env "$ENV{C_INCLUDE_PATH}")
-	list(APPEND _inc_env "${_path_env}")
-	string(REPLACE ":" ";" _path_env "$ENV{CPATH}")
-	list(APPEND _inc_env "${_path_env}")
-	string(REPLACE ":" ";" _path_env "$ENV{INCLUDE_PATH}")
-	list(APPEND _inc_env "${_path_env}")
-      endif()
-    endif()
-    list(APPEND _inc_env "${CMAKE_PLATFORM_IMPLICIT_INCLUDE_DIRECTORIES}")
-    list(APPEND _inc_env "${CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES}")
-    list(REMOVE_DUPLICATES _inc_env)
-
-    # set paths where to look for
-    set(PATH_TO_LOOK_FOR "${_inc_env}")
-
-    # Try to find the fftw header in the given paths
-    # -------------------------------------------------
-    # call cmake macro to find the header path
-    if(BLAS_INCDIR)
-      set(BLAS_mkl.h_DIRS "BLAS_mkl.h_DIRS-NOTFOUND")
-      find_path(BLAS_mkl.h_DIRS
-	NAMES mkl.h
-	HINTS ${BLAS_INCDIR})
-    else()
-      if(BLAS_DIR)
-	set(BLAS_mkl.h_DIRS "BLAS_mkl.h_DIRS-NOTFOUND")
-	find_path(BLAS_mkl.h_DIRS
-	  NAMES mkl.h
-	  HINTS ${BLAS_DIR}
-	  PATH_SUFFIXES "include")
-      else()
-	set(BLAS_mkl.h_DIRS "BLAS_mkl.h_DIRS-NOTFOUND")
-	find_path(BLAS_mkl.h_DIRS
-	  NAMES mkl.h
-	  HINTS ${PATH_TO_LOOK_FOR})
-      endif()
-    endif()
-    mark_as_advanced(BLAS_mkl.h_DIRS)
-
-    # If found, add path to cmake variable
-    # ------------------------------------
-    if (BLAS_mkl.h_DIRS)
-      set(BLAS_INCLUDE_DIRS "${BLAS_mkl.h_DIRS}")
-    else ()
-      set(BLAS_INCLUDE_DIRS "BLAS_INCLUDE_DIRS-NOTFOUND")
-      if(NOT BLAS_FIND_QUIETLY)
-	message(STATUS "Looking for BLAS -- mkl.h not found")
-      endif()
-    endif()
-
-    if (WIN32)
-      string(REPLACE ":" ";" _libdir "$ENV{LIB}")
-    elseif (APPLE)
-      string(REPLACE ":" ";" _libdir "$ENV{DYLD_LIBRARY_PATH}")
-    else ()
-      string(REPLACE ":" ";" _libdir "$ENV{LD_LIBRARY_PATH}")
-    endif ()
-    list(APPEND _libdir "${CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES}")
-    list(APPEND _libdir "${CMAKE_C_IMPLICIT_LINK_DIRECTORIES}")
-    # libiomp5
-    # --------
-    set(OMP_iomp5_LIBRARY "OMP_iomp5_LIBRARY-NOTFOUND")
-    find_library(OMP_iomp5_LIBRARY
-      NAMES iomp5
-      HINTS ${_libdir}
-      )
-    mark_as_advanced(OMP_iomp5_LIBRARY)
-    set(OMP_LIB "")
-    # libgomp
-    # -------
-    set(OMP_gomp_LIBRARY "OMP_gomp_LIBRARY-NOTFOUND")
-    find_library(OMP_gomp_LIBRARY
-      NAMES gomp
-      HINTS ${_libdir}
-      )
-    mark_as_advanced(OMP_gomp_LIBRARY)
-    # choose one or another depending on the compilo
-    if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
-      if (OMP_gomp_LIBRARY)
-	set(OMP_LIB "${OMP_gomp_LIBRARY}")
-      endif()
-    else()
-      if (OMP_iomp5_LIBRARY)
-	set(OMP_LIB "${OMP_iomp5_LIBRARY}")
-      endif()
-    endif()
-
-    if (UNIX AND NOT WIN32)
-      # m
-      find_library(M_LIBRARY
-	NAMES m
-	HINTS ${_libdir})
-      mark_as_advanced(M_LIBRARY)
-      if(M_LIBRARY)
-	set(LM "-lm")
-      else()
-	set(LM "")
-      endif()
-      # Fortran
-      set(LGFORTRAN "")
-      if (CMAKE_C_COMPILER_ID MATCHES "GNU")
-	find_library(
-	  FORTRAN_gfortran_LIBRARY
-	  NAMES gfortran
-	  HINTS ${_libdir}
-	  )
-	mark_as_advanced(FORTRAN_gfortran_LIBRARY)
-	if (FORTRAN_gfortran_LIBRARY)
-	  set(LGFORTRAN "${FORTRAN_gfortran_LIBRARY}")
-	endif()
-      elseif (CMAKE_C_COMPILER_ID MATCHES "Intel")
-	find_library(
-	  FORTRAN_ifcore_LIBRARY
-	  NAMES ifcore
-	  HINTS ${_libdir}
-	  )
-	mark_as_advanced(FORTRAN_ifcore_LIBRARY)
-	if (FORTRAN_ifcore_LIBRARY)
-	  set(LGFORTRAN "{FORTRAN_ifcore_LIBRARY}")
-	endif()
-      endif()
-      set(BLAS_COMPILER_FLAGS "")
-      if (NOT BLA_VENDOR STREQUAL "Intel10_64lp_seq")
-	if (CMAKE_C_COMPILER_ID STREQUAL "Intel")
-	  list(APPEND BLAS_COMPILER_FLAGS "-openmp")
-	endif()
-	if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
-	  list(APPEND BLAS_COMPILER_FLAGS "-fopenmp")
-	endif()
-      endif()
-      if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
-	if (BLA_VENDOR STREQUAL "Intel10_32")
-	  list(APPEND BLAS_COMPILER_FLAGS "-m32")
-	else()
-	  list(APPEND BLAS_COMPILER_FLAGS "-m64")
-	endif()
-	if (NOT BLA_VENDOR STREQUAL "Intel10_64lp_seq")
-	  list(APPEND OMP_LIB "-ldl")
-	endif()
-	if (ENV_MKLROOT)
-	  list(APPEND BLAS_COMPILER_FLAGS "-I${ENV_MKLROOT}/include")
-	endif()
-      endif()
-
-      set(additional_flags "")
-      if (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_SYSTEM_NAME STREQUAL "Linux")
-	set(additional_flags "-Wl,--no-as-needed")
-      endif()
-    endif ()
-
-    if (_LANGUAGES_ MATCHES C OR _LANGUAGES_ MATCHES CXX)
-      if(BLAS_FIND_QUIETLY OR NOT BLAS_FIND_REQUIRED)
-	find_dependency(Threads)
-      else()
-	find_dependency(Threads REQUIRED)
-      endif()
-
-      set(BLAS_SEARCH_LIBS "")
-
-      if(BLA_F95)
-
-	set(BLAS_mkl_SEARCH_SYMBOL SGEMM)
-	set(_LIBRARIES BLAS95_LIBRARIES)
-	if (WIN32)
-	  if (BLA_STATIC)
-	    set(BLAS_mkl_DLL_SUFFIX "")
-	  else()
-	    set(BLAS_mkl_DLL_SUFFIX "_dll")
-	  endif()
-
-	  # Find the main file (32-bit or 64-bit)
-	  set(BLAS_SEARCH_LIBS_WIN_MAIN "")
-	  if (BLA_VENDOR STREQUAL "Intel10_32" OR BLA_VENDOR STREQUAL "All")
-	    list(APPEND BLAS_SEARCH_LIBS_WIN_MAIN
-	      "mkl_blas95${BLAS_mkl_DLL_SUFFIX} mkl_intel_c${BLAS_mkl_DLL_SUFFIX}")
-	  endif()
-	  if (BLA_VENDOR STREQUAL "Intel10_64lp*" OR BLA_VENDOR STREQUAL "All")
-	    list(APPEND BLAS_SEARCH_LIBS_WIN_MAIN
-	      "mkl_blas95_lp64${BLAS_mkl_DLL_SUFFIX} mkl_intel_lp64${BLAS_mkl_DLL_SUFFIX}")
-	  endif ()
-
-	  # Add threading/sequential libs
-	  set(BLAS_SEARCH_LIBS_WIN_THREAD "")
-	  if (BLA_VENDOR STREQUAL "*_seq" OR BLA_VENDOR STREQUAL "All")
-	    list(APPEND BLAS_SEARCH_LIBS_WIN_THREAD
-	      "mkl_sequential${BLAS_mkl_DLL_SUFFIX}")
-	  endif()
-	  if (NOT BLA_VENDOR STREQUAL "*_seq" OR BLA_VENDOR STREQUAL "All")
-	    # old version
-	    list(APPEND BLAS_SEARCH_LIBS_WIN_THREAD
-	      "libguide40 mkl_intel_thread${BLAS_mkl_DLL_SUFFIX}")
-	    # mkl >= 10.3
-	    list(APPEND BLAS_SEARCH_LIBS_WIN_THREAD
-	      "libiomp5md mkl_intel_thread${BLAS_mkl_DLL_SUFFIX}")
-	  endif()
-
-	  # Cartesian product of the above
-	  foreach (MAIN ${BLAS_SEARCH_LIBS_WIN_MAIN})
-	    foreach (THREAD ${BLAS_SEARCH_LIBS_WIN_THREAD})
-	      list(APPEND BLAS_SEARCH_LIBS
-		"${MAIN} ${THREAD} mkl_core${BLAS_mkl_DLL_SUFFIX}")
-	    endforeach()
-	  endforeach()
-	else ()
-	  if (BLA_VENDOR STREQUAL "Intel10_32" OR BLA_VENDOR STREQUAL "All")
-	    list(APPEND BLAS_SEARCH_LIBS
-	      "mkl_blas95 mkl_intel mkl_intel_thread mkl_core guide")
-	  endif ()
-	  if (BLA_VENDOR STREQUAL "Intel10_64lp" OR BLA_VENDOR STREQUAL "All")
-	    # old version
-	    list(APPEND BLAS_SEARCH_LIBS
-	      "mkl_blas95 mkl_intel_lp64 mkl_intel_thread mkl_core guide")
-	    # mkl >= 10.3
-	    if (CMAKE_C_COMPILER_ID STREQUAL "Intel")
-	      list(APPEND BLAS_SEARCH_LIBS
-		"mkl_blas95_lp64 mkl_intel_lp64 mkl_intel_thread mkl_core")
-	    endif()
-	    if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
-	      list(APPEND BLAS_SEARCH_LIBS
-		"mkl_blas95_lp64 mkl_intel_lp64 mkl_gnu_thread mkl_core")
-	    endif()
-	  endif ()
-	  if (BLA_VENDOR STREQUAL "Intel10_64lp_seq" OR BLA_VENDOR STREQUAL "All")
-	    list(APPEND BLAS_SEARCH_LIBS
-	      "mkl_intel_lp64 mkl_sequential mkl_core")
-	    if (BLA_VENDOR STREQUAL "Intel10_64lp_seq")
-	      set(OMP_LIB "")
-	    endif()
-	  endif ()
-	endif ()
-
-      else ()
-
-	set(BLAS_mkl_SEARCH_SYMBOL sgemm)
-	set(_LIBRARIES BLAS_LIBRARIES)
-	if (WIN32)
-	  if (BLA_STATIC)
-	    set(BLAS_mkl_DLL_SUFFIX "")
-	  else()
-	    set(BLAS_mkl_DLL_SUFFIX "_dll")
-	  endif()
-
-	  # Find the main file (32-bit or 64-bit)
-	  set(BLAS_SEARCH_LIBS_WIN_MAIN "")
-	  if (BLA_VENDOR STREQUAL "Intel10_32" OR BLA_VENDOR STREQUAL "All")
-	    list(APPEND BLAS_SEARCH_LIBS_WIN_MAIN
-	      "mkl_intel_c${BLAS_mkl_DLL_SUFFIX}")
-	  endif()
-	  if (BLA_VENDOR STREQUAL "Intel10_64lp*" OR BLA_VENDOR STREQUAL "All")
-	    list(APPEND BLAS_SEARCH_LIBS_WIN_MAIN
-	      "mkl_intel_lp64${BLAS_mkl_DLL_SUFFIX}")
-	  endif ()
-
-	  # Add threading/sequential libs
-	  set(BLAS_SEARCH_LIBS_WIN_THREAD "")
-	  if (NOT BLA_VENDOR STREQUAL "*_seq" OR BLA_VENDOR STREQUAL "All")
-	    # old version
-	    list(APPEND BLAS_SEARCH_LIBS_WIN_THREAD
-	      "libguide40 mkl_intel_thread${BLAS_mkl_DLL_SUFFIX}")
-	    # mkl >= 10.3
-	    list(APPEND BLAS_SEARCH_LIBS_WIN_THREAD
-	      "libiomp5md mkl_intel_thread${BLAS_mkl_DLL_SUFFIX}")
-	  endif()
-	  if (BLA_VENDOR STREQUAL "*_seq" OR BLA_VENDOR STREQUAL "All")
-	    list(APPEND BLAS_SEARCH_LIBS_WIN_THREAD
-	      "mkl_sequential${BLAS_mkl_DLL_SUFFIX}")
-	  endif()
-
-	  # Cartesian product of the above
-	  foreach (MAIN ${BLAS_SEARCH_LIBS_WIN_MAIN})
-	    foreach (THREAD ${BLAS_SEARCH_LIBS_WIN_THREAD})
-	      list(APPEND BLAS_SEARCH_LIBS
-		"${MAIN} ${THREAD} mkl_core${BLAS_mkl_DLL_SUFFIX}")
-	    endforeach()
-	  endforeach()
-	else ()
-	  if (BLA_VENDOR STREQUAL "Intel10_32" OR BLA_VENDOR STREQUAL "All")
-	    list(APPEND BLAS_SEARCH_LIBS
-	      "mkl_intel mkl_intel_thread mkl_core guide")
-	  endif ()
-	  if (BLA_VENDOR STREQUAL "Intel10_64lp" OR BLA_VENDOR STREQUAL "All")
-	    # old version
-	    list(APPEND BLAS_SEARCH_LIBS
-	      "mkl_intel_lp64 mkl_intel_thread mkl_core guide")
-	    # mkl >= 10.3
-	    if (CMAKE_C_COMPILER_ID STREQUAL "Intel")
-	      list(APPEND BLAS_SEARCH_LIBS
-		"mkl_intel_lp64 mkl_intel_thread mkl_core")
-	    endif()
-	    if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
-	      list(APPEND BLAS_SEARCH_LIBS
-		"mkl_intel_lp64 mkl_gnu_thread mkl_core")
-	    endif()
-	  endif ()
-	  if (BLA_VENDOR STREQUAL "Intel10_64lp_seq" OR BLA_VENDOR STREQUAL "All")
-	    list(APPEND BLAS_SEARCH_LIBS
-	      "mkl_intel_lp64 mkl_sequential mkl_core")
-	    if (BLA_VENDOR STREQUAL "Intel10_64lp_seq")
-	      set(OMP_LIB "")
-	    endif()
-	  endif ()
-	  #older vesions of intel mkl libs
-	  if (BLA_VENDOR STREQUAL "Intel" OR BLA_VENDOR STREQUAL "All")
-	    list(APPEND BLAS_SEARCH_LIBS
-	      "mkl")
-	    list(APPEND BLAS_SEARCH_LIBS
-	      "mkl_ia32")
-	    list(APPEND BLAS_SEARCH_LIBS
-	      "mkl_em64t")
-	  endif ()
-	endif ()
-
-      endif ()
-
-      foreach (IT ${BLAS_SEARCH_LIBS})
-	string(REPLACE " " ";" SEARCH_LIBS ${IT})
-	if (${_LIBRARIES})
-	else ()
-	  check_fortran_libraries(
-	    ${_LIBRARIES}
-	    BLAS
-	    ${BLAS_mkl_SEARCH_SYMBOL}
-	    "${additional_flags}"
-	    "${SEARCH_LIBS}"
-	    "${OMP_LIB};${CMAKE_THREAD_LIBS_INIT};${LM}"
-	    )
-	  if(_LIBRARIES)
-	    set(BLAS_LINKER_FLAGS "${additional_flags}")
-	  endif()
-	endif()
-      endforeach ()
-      if(NOT BLAS_FIND_QUIETLY)
-        if(${_LIBRARIES})
-          message(STATUS "Looking for MKL BLAS: found")
-        else()
-          message(STATUS "Looking for MKL BLAS: not found")
-        endif()
-      endif()
-      if (${_LIBRARIES} AND NOT BLAS_VENDOR_FOUND)
-          set (BLAS_VENDOR_FOUND "Intel MKL")
-      endif()
-    endif ()
-  endif()
-endif ()
-
-
-if (BLA_VENDOR STREQUAL "Goto" OR BLA_VENDOR STREQUAL "All")
-
-  if(NOT BLAS_LIBRARIES)
-    # gotoblas (http://www.tacc.utexas.edu/tacc-projects/gotoblas2)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      sgemm
-      ""
-      "goto2"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for Goto BLAS: found")
-      else()
-	message(STATUS "Looking for Goto BLAS: not found")
-      endif()
-    endif()
-  endif()
-  if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-      set (BLAS_VENDOR_FOUND "Goto")
-  endif()
-
-endif ()
-
-
-# OpenBlas
-if (BLA_VENDOR STREQUAL "Open" OR BLA_VENDOR STREQUAL "All")
-
-  if(NOT BLAS_LIBRARIES)
-    # openblas (http://www.openblas.net/)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      sgemm
-      ""
-      "openblas"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for Open BLAS: found")
-      else()
-	message(STATUS "Looking for Open BLAS: not found")
-      endif()
-    endif()
-  endif()
-  if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-      set (BLAS_VENDOR_FOUND "Openblas")
-  endif()
-
-endif ()
-
-
-# EigenBlas
-if (BLA_VENDOR STREQUAL "Eigen" OR BLA_VENDOR STREQUAL "All")
-
-  if(NOT BLAS_LIBRARIES)
-    # eigenblas (http://eigen.tuxfamily.org/index.php?title=Main_Page)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      sgemm
-      ""
-      "eigen_blas"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-	message(STATUS "Looking for Eigen BLAS: found")
-      else()
-	message(STATUS "Looking for Eigen BLAS: not found")
-      endif()
-    endif()
-  endif()
-
-  if(NOT BLAS_LIBRARIES)
-    # eigenblas (http://eigen.tuxfamily.org/index.php?title=Main_Page)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      sgemm
-      ""
-      "eigen_blas_static"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for Eigen BLAS: found")
-      else()
-	message(STATUS "Looking for Eigen BLAS: not found")
-      endif()
-    endif()
-  endif()
-  if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-      set (BLAS_VENDOR_FOUND "Eigen")
-  endif()
-
-endif ()
-
-
-if (BLA_VENDOR STREQUAL "ATLAS" OR BLA_VENDOR STREQUAL "All")
-
-  if(NOT BLAS_LIBRARIES)
-    # BLAS in ATLAS library? (http://math-atlas.sourceforge.net/)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      dgemm
-      ""
-      "f77blas;atlas"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for Atlas BLAS: found")
-      else()
-	message(STATUS "Looking for Atlas BLAS: not found")
-      endif()
-    endif()
-  endif()
-
-  if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-      set (BLAS_VENDOR_FOUND "Atlas")
-  endif()
-
-endif ()
-
-
-# BLAS in PhiPACK libraries? (requires generic BLAS lib, too)
-if (BLA_VENDOR STREQUAL "PhiPACK" OR BLA_VENDOR STREQUAL "All")
-
-  if(NOT BLAS_LIBRARIES)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      sgemm
-      ""
-      "sgemm;dgemm;blas"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for PhiPACK BLAS: found")
-      else()
-	message(STATUS "Looking for PhiPACK BLAS: not found")
-      endif()
-    endif()
-  endif()
-
-  if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-      set (BLAS_VENDOR_FOUND "PhiPACK")
-  endif()
-
-endif ()
-
-
-# BLAS in Alpha CXML library?
-if (BLA_VENDOR STREQUAL "CXML" OR BLA_VENDOR STREQUAL "All")
-
-  if(NOT BLAS_LIBRARIES)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      sgemm
-      ""
-      "cxml"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for CXML BLAS: found")
-      else()
-	message(STATUS "Looking for CXML BLAS: not found")
-      endif()
-    endif()
-  endif()
-
-  if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-      set (BLAS_VENDOR_FOUND "CXML")
-  endif()
-
-endif ()
-
-
-# BLAS in Alpha DXML library? (now called CXML, see above)
-if (BLA_VENDOR STREQUAL "DXML" OR BLA_VENDOR STREQUAL "All")
-
-  if(NOT BLAS_LIBRARIES)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      sgemm
-      ""
-      "dxml"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for DXML BLAS: found")
-      else()
-	message(STATUS "Looking for DXML BLAS: not found")
-      endif()
-    endif()
-  endif()
-
-  if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-      set (BLAS_VENDOR_FOUND "DXML")
-  endif()
-  
-endif ()
-
-
-# BLAS in Sun Performance library?
-if (BLA_VENDOR STREQUAL "SunPerf" OR BLA_VENDOR STREQUAL "All")
-
-  if(NOT BLAS_LIBRARIES)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      sgemm
-      "-xlic_lib=sunperf"
-      "sunperf;sunmath"
-      ""
-      )
-    if(BLAS_LIBRARIES)
-      set(BLAS_LINKER_FLAGS "-xlic_lib=sunperf")
-    endif()
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for SunPerf BLAS: found")
-      else()
-	message(STATUS "Looking for SunPerf BLAS: not found")
-      endif()
-    endif()
-  endif()
-
-  if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-      set (BLAS_VENDOR_FOUND "SunPerf")
-  endif()
-
-endif ()
-
-
-# BLAS in SCSL library?  (SGI/Cray Scientific Library)
-if (BLA_VENDOR STREQUAL "SCSL" OR BLA_VENDOR STREQUAL "All")
-
-  if(NOT BLAS_LIBRARIES)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      sgemm
-      ""
-      "scsl"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for SCSL BLAS: found")
-      else()
-	message(STATUS "Looking for SCSL BLAS: not found")
-      endif()
-    endif()
-  endif()
-
-  if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-      set (BLAS_VENDOR_FOUND "SunPerf")
-  endif()
-
-endif ()
-
-
-# BLAS in SGIMATH library?
-if (BLA_VENDOR STREQUAL "SGIMATH" OR BLA_VENDOR STREQUAL "All")
-
-  if(NOT BLAS_LIBRARIES)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      sgemm
-      ""
-      "complib.sgimath"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for SGIMATH BLAS: found")
-      else()
-	message(STATUS "Looking for SGIMATH BLAS: not found")
-      endif()
-    endif()
-  endif()
-
-  if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-      set (BLAS_VENDOR_FOUND "SGIMATH")
-  endif()
-
-endif ()
-
-
-# BLAS in IBM ESSL library (requires generic BLAS lib, too)
-if (BLA_VENDOR STREQUAL "IBMESSL" OR BLA_VENDOR STREQUAL "All")
-
-  if(NOT BLAS_LIBRARIES)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      sgemm
-      ""
-      "essl;xlfmath;xlf90_r;blas"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for IBM ESSL BLAS: found")
-      else()
-	message(STATUS "Looking for IBM ESSL BLAS: not found")
-      endif()
-    endif()
-  endif()
-
-  if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-      set (BLAS_VENDOR_FOUND "IBM ESSL")
-  endif()
-
-endif ()
-
-# BLAS in IBM ESSL_MT library (requires generic BLAS lib, too)
-if (BLA_VENDOR STREQUAL "IBMESSLMT" OR BLA_VENDOR STREQUAL "All")
-
-  if(NOT BLAS_LIBRARIES)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      sgemm
-      ""
-      "esslsmp;xlsmp;xlfmath;xlf90_r;blas"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for IBM ESSL MT BLAS: found")
-      else()
-	message(STATUS "Looking for IBM ESSL MT BLAS: not found")
-      endif()
-    endif()
-  endif()
-
-  if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-      set (BLAS_VENDOR_FOUND "IBM ESSL MT")
-  endif()
-
-endif ()
-
-
-#BLAS in acml library?
-if (BLA_VENDOR MATCHES "ACML.*" OR BLA_VENDOR STREQUAL "All")
-
-  if( ((BLA_VENDOR STREQUAL "ACML") AND (NOT BLAS_ACML_LIB_DIRS)) OR
-      ((BLA_VENDOR STREQUAL "ACML_MP") AND (NOT BLAS_ACML_MP_LIB_DIRS)) OR
-      ((BLA_VENDOR STREQUAL "ACML_GPU") AND (NOT BLAS_ACML_GPU_LIB_DIRS)))
-
-    # try to find acml in "standard" paths
-    if( WIN32 )
-      file( GLOB _ACML_ROOT "C:/AMD/acml*/ACML-EULA.txt" )
-    else()
-      file( GLOB _ACML_ROOT "/opt/acml*/ACML-EULA.txt" )
-    endif()
-    if( WIN32 )
-      file( GLOB _ACML_GPU_ROOT "C:/AMD/acml*/GPGPUexamples" )
-    else()
-      file( GLOB _ACML_GPU_ROOT "/opt/acml*/GPGPUexamples" )
-    endif()
-    list(GET _ACML_ROOT 0 _ACML_ROOT)
-    list(GET _ACML_GPU_ROOT 0 _ACML_GPU_ROOT)
-
-    if( _ACML_ROOT )
-
-      get_filename_component( _ACML_ROOT ${_ACML_ROOT} PATH )
-      if( SIZEOF_INTEGER EQUAL 8 )
-	set( _ACML_PATH_SUFFIX "_int64" )
-      else()
-	set( _ACML_PATH_SUFFIX "" )
-      endif()
-      if( CMAKE_Fortran_COMPILER_ID STREQUAL "Intel" )
-	set( _ACML_COMPILER32 "ifort32" )
-	set( _ACML_COMPILER64 "ifort64" )
-      elseif( CMAKE_Fortran_COMPILER_ID STREQUAL "SunPro" )
-	set( _ACML_COMPILER32 "sun32" )
-	set( _ACML_COMPILER64 "sun64" )
-      elseif( CMAKE_Fortran_COMPILER_ID STREQUAL "PGI" )
-	set( _ACML_COMPILER32 "pgi32" )
-	if( WIN32 )
-	  set( _ACML_COMPILER64 "win64" )
-	else()
-	  set( _ACML_COMPILER64 "pgi64" )
-	endif()
-      elseif( CMAKE_Fortran_COMPILER_ID STREQUAL "Open64" )
-	# 32 bit builds not supported on Open64 but for code simplicity
-	# We'll just use the same directory twice
-	set( _ACML_COMPILER32 "open64_64" )
-	set( _ACML_COMPILER64 "open64_64" )
-      elseif( CMAKE_Fortran_COMPILER_ID STREQUAL "NAG" )
-	set( _ACML_COMPILER32 "nag32" )
-	set( _ACML_COMPILER64 "nag64" )
-      else()
-	set( _ACML_COMPILER32 "gfortran32" )
-	set( _ACML_COMPILER64 "gfortran64" )
-      endif()
-
-      if( BLA_VENDOR STREQUAL "ACML_MP" )
-	set(_ACML_MP_LIB_DIRS
-	  "${_ACML_ROOT}/${_ACML_COMPILER32}_mp${_ACML_PATH_SUFFIX}/lib"
-	  "${_ACML_ROOT}/${_ACML_COMPILER64}_mp${_ACML_PATH_SUFFIX}/lib" )
-      else()
-	set(_ACML_LIB_DIRS
-	  "${_ACML_ROOT}/${_ACML_COMPILER32}${_ACML_PATH_SUFFIX}/lib"
-	  "${_ACML_ROOT}/${_ACML_COMPILER64}${_ACML_PATH_SUFFIX}/lib" )
-      endif()
-
-    endif()
-
-  elseif(BLAS_${BLA_VENDOR}_LIB_DIRS)
-
-    set(_${BLA_VENDOR}_LIB_DIRS ${BLAS_${BLA_VENDOR}_LIB_DIRS})
-
-  endif()
-
-  if( BLA_VENDOR STREQUAL "ACML_MP" )
-    foreach( BLAS_ACML_MP_LIB_DIRS ${_ACML_MP_LIB_DIRS})
-      check_fortran_libraries (
-	BLAS_LIBRARIES
-	BLAS
-	sgemm
-	"" "acml_mp;acml_mv" "" ${BLAS_ACML_MP_LIB_DIRS}
-	)
-      if( BLAS_LIBRARIES )
-	break()
-      endif()
-    endforeach()
-  elseif( BLA_VENDOR STREQUAL "ACML_GPU" )
-    foreach( BLAS_ACML_GPU_LIB_DIRS ${_ACML_GPU_LIB_DIRS})
-      check_fortran_libraries (
-	BLAS_LIBRARIES
-	BLAS
-	sgemm
-	"" "acml;acml_mv;CALBLAS" "" ${BLAS_ACML_GPU_LIB_DIRS}
-	)
-      if( BLAS_LIBRARIES )
-	break()
-      endif()
-    endforeach()
-  else()
-    foreach( BLAS_ACML_LIB_DIRS ${_ACML_LIB_DIRS} )
-      check_fortran_libraries (
-	BLAS_LIBRARIES
-	BLAS
-	sgemm
-	"" "acml;acml_mv" "" ${BLAS_ACML_LIB_DIRS}
-	)
-      if( BLAS_LIBRARIES )
-	break()
-      endif()
-    endforeach()
-  endif()
-
-  # Either acml or acml_mp should be in LD_LIBRARY_PATH but not both
-  if(NOT BLAS_LIBRARIES)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      sgemm
-      ""
-      "acml;acml_mv"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for ACML BLAS: found")
-      else()
-	message(STATUS "Looking for ACML BLAS: not found")
-      endif()
-    endif()
-  endif()
-
-  if(NOT BLAS_LIBRARIES)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      sgemm
-      ""
-      "acml_mp;acml_mv"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for ACML BLAS: found")
-      else()
-	message(STATUS "Looking for ACML BLAS: not found")
-      endif()
-    endif()
-  endif()
-
-  if(NOT BLAS_LIBRARIES)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      sgemm
-      ""
-      "acml;acml_mv;CALBLAS"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for ACML BLAS: found")
-      else()
-	message(STATUS "Looking for ACML BLAS: not found")
-      endif()
-    endif()
-  endif()
-
-  if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-      set (BLAS_VENDOR_FOUND "ACML")
-  endif()
-
-endif () # ACML
-
-
-# Apple BLAS library?
-if (BLA_VENDOR STREQUAL "Apple" OR BLA_VENDOR STREQUAL "All")
-
-  if(NOT BLAS_LIBRARIES)
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      dgemm
-      ""
-      "Accelerate"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for Apple BLAS: found")
-      else()
-	message(STATUS "Looking for Apple BLAS: not found")
-      endif()
-    endif()
-  endif()
-
-  if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-      set (BLAS_VENDOR_FOUND "Apple Accelerate")
-  endif()
-
-endif ()
-
-
-if (BLA_VENDOR STREQUAL "NAS" OR BLA_VENDOR STREQUAL "All")
-
-  if ( NOT BLAS_LIBRARIES )
-    check_fortran_libraries(
-      BLAS_LIBRARIES
-      BLAS
-      dgemm
-      ""
-      "vecLib"
-      ""
-      )
-    if(NOT BLAS_FIND_QUIETLY)
-      if(BLAS_LIBRARIES)
-	message(STATUS "Looking for NAS BLAS: found")
-      else()
-	message(STATUS "Looking for NAS BLAS: not found")
-      endif()
-    endif()
-  endif ()
-
-  if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-      set (BLAS_VENDOR_FOUND "NAS")
-  endif()
-
-endif ()
-
-
-# Generic BLAS library?
-if (BLA_VENDOR STREQUAL "Generic" OR BLA_VENDOR STREQUAL "All")
-
-  set(BLAS_SEARCH_LIBS "blas;blas_LINUX;blas_MAC;blas_WINDOWS;refblas")
-  foreach (SEARCH_LIB ${BLAS_SEARCH_LIBS})
-    if (BLAS_LIBRARIES)
-    else ()
-      check_fortran_libraries(
-	BLAS_LIBRARIES
-	BLAS
-	sgemm
-	""
-	"${SEARCH_LIB}"
-	"${LGFORTRAN}"
-	)
-      if(NOT BLAS_FIND_QUIETLY)
-	if(BLAS_LIBRARIES)
-	  message(STATUS "Looking for Generic BLAS: found")
-	else()
-	  message(STATUS "Looking for Generic BLAS: not found")
-	endif()
-      endif()
-    endif()
-  endforeach ()
-
-  if (BLAS_LIBRARIES AND NOT BLAS_VENDOR_FOUND)
-      set (BLAS_VENDOR_FOUND "Netlib or other Generic libblas")
-  endif()
-
-endif ()
-
-
-if(BLA_F95)
-
-  if(BLAS95_LIBRARIES)
-    set(BLAS95_FOUND TRUE)
-  else()
-    set(BLAS95_FOUND FALSE)
-  endif()
-
-  if(NOT BLAS_FIND_QUIETLY)
-    if(BLAS95_FOUND)
-      message(STATUS "A library with BLAS95 API found.")
-      message(STATUS "BLAS_LIBRARIES ${BLAS_LIBRARIES}")
-    else()
-      message(WARNING "BLA_VENDOR has been set to ${BLA_VENDOR} but blas 95 libraries could not be found or check of symbols failed."
-	"\nPlease indicate where to find blas libraries. You have three options:\n"
-	"- Option 1: Provide the installation directory of BLAS library with cmake option: -DBLAS_DIR=your/path/to/blas\n"
-	"- Option 2: Provide the directory where to find BLAS libraries with cmake option: -DBLAS_LIBDIR=your/path/to/blas/libs\n"
-	"- Option 3: Update your environment variable (Linux: LD_LIBRARY_PATH, Windows: LIB, Mac: DYLD_LIBRARY_PATH)\n"
-	"\nTo follow libraries detection more precisely you can activate a verbose mode with -DBLAS_VERBOSE=ON at cmake configure."
-	"\nYou could also specify a BLAS vendor to look for by setting -DBLA_VENDOR=blas_vendor_name."
-	"\nList of possible BLAS vendor: Goto, ATLAS PhiPACK, CXML, DXML, SunPerf, SCSL, SGIMATH, IBMESSL, Intel10_32 (intel mkl v10 32 bit),"
-	"Intel10_64lp (intel mkl v10 64 bit, lp thread model, lp64 model), Intel10_64lp_seq (intel mkl v10 64 bit, sequential code, lp64 model),"
-	"Intel( older versions of mkl 32 and 64 bit), ACML, ACML_MP, ACML_GPU, Apple, NAS, Generic")
-      if(BLAS_FIND_REQUIRED)
-	message(FATAL_ERROR
-	  "A required library with BLAS95 API not found. Please specify library location.")
-      else()
-	message(STATUS
-	  "A library with BLAS95 API not found. Please specify library location.")
-      endif()
-    endif()
-  endif()
-
-  set(BLAS_FOUND TRUE)
-  set(BLAS_LIBRARIES "${BLAS95_LIBRARIES}")
-
-else()
-
-  if(BLAS_LIBRARIES)
-    set(BLAS_FOUND TRUE)
-  else()
-    set(BLAS_FOUND FALSE)
-  endif()
-
-  if(NOT BLAS_FIND_QUIETLY)
-    if(BLAS_FOUND)
-      message(STATUS "A library with BLAS API found.")
-      message(STATUS "BLAS_LIBRARIES ${BLAS_LIBRARIES}")
-    else()
-      message(WARNING "BLA_VENDOR has been set to ${BLA_VENDOR} but blas libraries could not be found or check of symbols failed."
-	"\nPlease indicate where to find blas libraries. You have three options:\n"
-	"- Option 1: Provide the installation directory of BLAS library with cmake option: -DBLAS_DIR=your/path/to/blas\n"
-	"- Option 2: Provide the directory where to find BLAS libraries with cmake option: -DBLAS_LIBDIR=your/path/to/blas/libs\n"
-	"- Option 3: Update your environment variable (Linux: LD_LIBRARY_PATH, Windows: LIB, Mac: DYLD_LIBRARY_PATH)\n"
-	"\nTo follow libraries detection more precisely you can activate a verbose mode with -DBLAS_VERBOSE=ON at cmake configure."
-	"\nYou could also specify a BLAS vendor to look for by setting -DBLA_VENDOR=blas_vendor_name."
-	"\nList of possible BLAS vendor: Goto, ATLAS PhiPACK, CXML, DXML, SunPerf, SCSL, SGIMATH, IBMESSL, Intel10_32 (intel mkl v10 32 bit),"
-	"Intel10_64lp (intel mkl v10 64 bit, lp thread model, lp64 model), Intel10_64lp_seq (intel mkl v10 64 bit, sequential code, lp64 model),"
-	"Intel( older versions of mkl 32 and 64 bit), ACML, ACML_MP, ACML_GPU, Apple, NAS, Generic")
-      if(BLAS_FIND_REQUIRED)
-	message(FATAL_ERROR
-	  "A required library with BLAS API not found. Please specify library location.")
-      else()
-	message(STATUS
-	  "A library with BLAS API not found. Please specify library location.")
-      endif()
-    endif()
-  endif()
-
-endif()
-
-set(CMAKE_FIND_LIBRARY_SUFFIXES ${_blas_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES})
-
-if (BLAS_FOUND)
-  list(GET BLAS_LIBRARIES 0 first_lib)
-  get_filename_component(first_lib_path "${first_lib}" PATH)
-  if (${first_lib_path} MATCHES "(/lib(32|64)?$)|(/lib/intel64$|/lib/ia32$)")
-    string(REGEX REPLACE "(/lib(32|64)?$)|(/lib/intel64$|/lib/ia32$)" "" not_cached_dir "${first_lib_path}")
-    set(BLAS_DIR_FOUND "${not_cached_dir}" CACHE PATH "Installation directory of BLAS library" FORCE)
-  else()
-    set(BLAS_DIR_FOUND "${first_lib_path}" CACHE PATH "Installation directory of BLAS library" FORCE)
-  endif()
-endif()
-mark_as_advanced(BLAS_DIR)
-mark_as_advanced(BLAS_DIR_FOUND)
diff --git a/cmake/FindGLEW.cmake b/cmake/FindGLEW.cmake
deleted file mode 100644
index 9d486d5..0000000
--- a/cmake/FindGLEW.cmake
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright (c) 2009 Boudewijn Rempt <boud@valdyas.org>                                                                                          
-#                                                                                                                                                
-# Redistribution and use is allowed according to the terms of the BSD license.                                                                   
-# For details see the accompanying COPYING-CMAKE-SCRIPTS file. 
-# 
-# - try to find glew library and include files
-#  GLEW_INCLUDE_DIR, where to find GL/glew.h, etc.
-#  GLEW_LIBRARIES, the libraries to link against
-#  GLEW_FOUND, If false, do not try to use GLEW.
-# Also defined, but not for general use are:
-#  GLEW_GLEW_LIBRARY = the full path to the glew library.
-
-if (WIN32)
-
-  if(CYGWIN)
-
-    find_path( GLEW_INCLUDE_DIR GL/glew.h)
-
-    find_library( GLEW_GLEW_LIBRARY glew32
-      ${OPENGL_LIBRARY_DIR}
-      /usr/lib/w32api
-      /usr/X11R6/lib
-    )
-
-
-  else(CYGWIN)
-  
-    find_path( GLEW_INCLUDE_DIR GL/glew.h
-      $ENV{GLEW_ROOT_PATH}/include
-    )
-
-    find_library( GLEW_GLEW_LIBRARY
-      NAMES glew glew32
-      PATHS
-      $ENV{GLEW_ROOT_PATH}/lib
-      ${OPENGL_LIBRARY_DIR}
-    )
-
-  endif(CYGWIN)
-
-else (WIN32)
-
-  if (APPLE)
-# These values for Apple could probably do with improvement.
-    find_path( GLEW_INCLUDE_DIR glew.h
-      /System/Library/Frameworks/GLEW.framework/Versions/A/Headers
-      ${OPENGL_LIBRARY_DIR}
-    )
-    set(GLEW_GLEW_LIBRARY "-framework GLEW" CACHE STRING "GLEW library for OSX")
-    set(GLEW_cocoa_LIBRARY "-framework Cocoa" CACHE STRING "Cocoa framework for OSX")
-  else (APPLE)
-
-    find_path( GLEW_INCLUDE_DIR GL/glew.h
-      /usr/include/GL
-      /usr/openwin/share/include
-      /usr/openwin/include
-      /usr/X11R6/include
-      /usr/include/X11
-      /opt/graphics/OpenGL/include
-      /opt/graphics/OpenGL/contrib/libglew
-    )
-
-    find_library( GLEW_GLEW_LIBRARY GLEW
-      /usr/openwin/lib
-      /usr/X11R6/lib
-    )
-
-  endif (APPLE)
-
-endif (WIN32)
-
-set( GLEW_FOUND "NO" )
-if(GLEW_INCLUDE_DIR)
-  if(GLEW_GLEW_LIBRARY)
-    # Is -lXi and -lXmu required on all platforms that have it?
-    # If not, we need some way to figure out what platform we are on.
-    set( GLEW_LIBRARIES
-      ${GLEW_GLEW_LIBRARY}
-      ${GLEW_cocoa_LIBRARY}
-    )
-    set( GLEW_FOUND "YES" )
-
-#The following deprecated settings are for backwards compatibility with CMake1.4
-    set (GLEW_LIBRARY ${GLEW_LIBRARIES})
-    set (GLEW_INCLUDE_PATH ${GLEW_INCLUDE_DIR})
-
-  endif(GLEW_GLEW_LIBRARY)
-endif(GLEW_INCLUDE_DIR)
-
-if(GLEW_FOUND)
-  if(NOT GLEW_FIND_QUIETLY)
-    message(STATUS "Found Glew: ${GLEW_LIBRARIES}")
-  endif(NOT GLEW_FIND_QUIETLY)
-else(GLEW_FOUND)
-  if(GLEW_FIND_REQUIRED)
-    message(FATAL_ERROR "Could not find Glew")
-  endif(GLEW_FIND_REQUIRED)
-endif(GLEW_FOUND)
-
-mark_as_advanced(
-  GLEW_INCLUDE_DIR
-  GLEW_GLEW_LIBRARY
-  GLEW_Xmu_LIBRARY
-  GLEW_Xi_LIBRARY
-)
diff --git a/cmake/FindGSL.cmake b/cmake/FindGSL.cmake
deleted file mode 100644
index 8632232..0000000
--- a/cmake/FindGSL.cmake
+++ /dev/null
@@ -1,170 +0,0 @@
-# Try to find gnu scientific library GSL
-# See 
-# http://www.gnu.org/software/gsl/  and
-# http://gnuwin32.sourceforge.net/packages/gsl.htm
-#
-# Once run this will define: 
-# 
-# GSL_FOUND       = system has GSL lib
-#
-# GSL_LIBRARIES   = full path to the libraries
-#    on Unix/Linux with additional linker flags from "gsl-config --libs"
-# 
-# CMAKE_GSL_CXX_FLAGS  = Unix compiler flags for GSL, essentially "`gsl-config --cxxflags`"
-#
-# GSL_INCLUDE_DIR      = where to find headers 
-#
-# GSL_LINK_DIRECTORIES = link directories, useful for rpath on Unix
-# GSL_EXE_LINKER_FLAGS = rpath on Unix
-#
-# Felix Woelk 07/2004
-# Jan Woetzel
-#
-# www.mip.informatik.uni-kiel.de
-# --------------------------------
-
-if(WIN32)
-  # JW tested with gsl-1.8, Windows XP, MSVS 7.1
-  set(GSL_POSSIBLE_ROOT_DIRS
-    ${GSL_ROOT_DIR}
-    $ENV{GSL_ROOT_DIR}
-    ${GSL_DIR}
-    ${GSL_HOME}    
-    $ENV{GSL_DIR}
-    $ENV{GSL_HOME}
-    $ENV{EXTRA}
-    "C:/Program Files/GnuWin32"
-    )
-  find_path(GSL_INCLUDE_DIR
-    NAMES gsl/gsl_cdf.h gsl/gsl_randist.h
-    PATHS ${GSL_POSSIBLE_ROOT_DIRS}
-    PATH_SUFFIXES include
-    DOC "GSL header include dir"
-    )
-  
-  find_library(GSL_GSL_LIBRARY
-    NAMES libgsl.dll.a gsl libgsl
-    PATHS  ${GSL_POSSIBLE_ROOT_DIRS}
-    PATH_SUFFIXES lib
-    DOC "GSL library" )
-  
-  if(NOT GSL_GSL_LIBRARY)
-	find_file(GSL_GSL_LIBRARY
-		NAMES libgsl.dll.a
-		PATHS  ${GSL_POSSIBLE_ROOT_DIRS}
-		PATH_SUFFIXES lib
-		DOC "GSL library")
-  endif()
-  
-  find_library(GSL_GSLCBLAS_LIBRARY
-    NAMES libgslcblas.dll.a gslcblas libgslcblas
-    PATHS  ${GSL_POSSIBLE_ROOT_DIRS}
-    PATH_SUFFIXES lib
-    DOC "GSL cblas library dir" )
-  
-  if(NOT GSL_GSLCBLAS_LIBRARY)
-	find_file(GSL_GSLCBLAS_LIBRARY
-		NAMES libgslcblas.dll.a
-		PATHS  ${GSL_POSSIBLE_ROOT_DIRS}
-		PATH_SUFFIXES lib
-		DOC "GSL library")
-  endif()
-  
-  set(GSL_LIBRARIES ${GSL_GSL_LIBRARY})
-
-  #message("DBG\n"
-  #  "GSL_GSL_LIBRARY=${GSL_GSL_LIBRARY}\n"
-  #  "GSL_GSLCBLAS_LIBRARY=${GSL_GSLCBLAS_LIBRARY}\n"
-  #  "GSL_LIBRARIES=${GSL_LIBRARIES}")
-
-
-else(WIN32)
-  
-  if(UNIX) 
-    set(GSL_CONFIG_PREFER_PATH 
-      "$ENV{GSL_DIR}/bin"
-      "$ENV{GSL_DIR}"
-      "$ENV{GSL_HOME}/bin" 
-      "$ENV{GSL_HOME}" 
-      CACHE STRING "preferred path to GSL (gsl-config)")
-    find_program(GSL_CONFIG gsl-config
-      ${GSL_CONFIG_PREFER_PATH}
-      /usr/bin/
-      )
-    # message("DBG GSL_CONFIG ${GSL_CONFIG}")
-    
-    if (GSL_CONFIG) 
-      # set CXXFLAGS to be fed into CXX_FLAGS by the user:
-      set(GSL_CXX_FLAGS "`${GSL_CONFIG} --cflags`")
-      
-      # set INCLUDE_DIRS to prefix+include
-      exec_program(${GSL_CONFIG}
-        ARGS --prefix
-        OUTPUT_VARIABLE GSL_PREFIX)
-      set(GSL_INCLUDE_DIR ${GSL_PREFIX}/include CACHE STRING INTERNAL)
-
-      # set link libraries and link flags
-      #set(GSL_LIBRARIES "`${GSL_CONFIG} --libs`")
-      exec_program(${GSL_CONFIG}
-        ARGS --libs
-        OUTPUT_VARIABLE GSL_LIBRARIES )
-        
-      # extract link dirs for rpath  
-      exec_program(${GSL_CONFIG}
-        ARGS --libs
-        OUTPUT_VARIABLE GSL_CONFIG_LIBS )
-      
-      # extract version
-      exec_program(${GSL_CONFIG}
-        ARGS --version
-        OUTPUT_VARIABLE GSL_FULL_VERSION )
-      
-      # split version as major/minor
-      string(REGEX MATCH "(.)\\..*" GSL_VERSION_MAJOR_ "${GSL_FULL_VERSION}")
-      set(GSL_VERSION_MAJOR ${CMAKE_MATCH_1})
-      string(REGEX MATCH ".\\.(.*)" GSL_VERSION_MINOR_ "${GSL_FULL_VERSION}")
-      set(GSL_VERSION_MINOR ${CMAKE_MATCH_1})
-
-      # split off the link dirs (for rpath)
-      # use regular expression to match wildcard equivalent "-L*<endchar>"
-      # with <endchar> is a space or a semicolon
-      string(REGEX MATCHALL "[-][L]([^ ;])+" 
-        GSL_LINK_DIRECTORIES_WITH_PREFIX 
-        "${GSL_CONFIG_LIBS}" )
-      #      message("DBG  GSL_LINK_DIRECTORIES_WITH_PREFIX=${GSL_LINK_DIRECTORIES_WITH_PREFIX}")
-
-      # remove prefix -L because we need the pure directory for LINK_DIRECTORIES
-      
-      if (GSL_LINK_DIRECTORIES_WITH_PREFIX)
-        string(REGEX REPLACE "[-][L]" "" GSL_LINK_DIRECTORIES ${GSL_LINK_DIRECTORIES_WITH_PREFIX} )
-      endif (GSL_LINK_DIRECTORIES_WITH_PREFIX)
-      set(GSL_EXE_LINKER_FLAGS "-Wl,-rpath,${GSL_LINK_DIRECTORIES}" CACHE STRING INTERNAL)
-      #      message("DBG  GSL_LINK_DIRECTORIES=${GSL_LINK_DIRECTORIES}")
-      #      message("DBG  GSL_EXE_LINKER_FLAGS=${GSL_EXE_LINKER_FLAGS}")
-
-      #      add_definitions("-DHAVE_GSL")
-      #      set(GSL_DEFINITIONS "-DHAVE_GSL")
-      mark_as_advanced(
-        GSL_CXX_FLAGS
-        GSL_INCLUDE_DIR
-        GSL_LIBRARIES
-        GSL_LINK_DIRECTORIES
-        GSL_DEFINITIONS
-        )
-      message(STATUS "Using GSL from ${GSL_PREFIX}")
-      
-    else(GSL_CONFIG)
-      message("FindGSL.cmake: gsl-config not found. Please set it manually. GSL_CONFIG=${GSL_CONFIG}")
-    endif(GSL_CONFIG)
-
-  endif(UNIX)
-endif(WIN32)
-
-
-if(GSL_LIBRARIES)
-  if(GSL_INCLUDE_DIR OR GSL_CXX_FLAGS)
-
-    set(GSL_FOUND 1)
-    
-  endif(GSL_INCLUDE_DIR OR GSL_CXX_FLAGS)
-endif(GSL_LIBRARIES)
diff --git a/cmake/FindLAPACK.cmake b/cmake/FindLAPACK.cmake
deleted file mode 100644
index 3fd7388..0000000
--- a/cmake/FindLAPACK.cmake
+++ /dev/null
@@ -1,274 +0,0 @@
-# Find LAPACK library
-#
-# This module finds an installed library that implements the LAPACK
-# linear-algebra interface (see http://www.netlib.org/lapack/).
-# The approach follows mostly that taken for the autoconf macro file, acx_lapack.m4
-# (distributed at http://ac-archive.sourceforge.net/ac-archive/acx_lapack.html).
-#
-# This module sets the following variables:
-#  LAPACK_FOUND - set to true if a library implementing the LAPACK interface
-#    is found
-#  LAPACK_INCLUDE_DIR - Directories containing the LAPACK header files
-#  LAPACK_DEFINITIONS - Compilation options to use LAPACK
-#  LAPACK_LINKER_FLAGS - Linker flags to use LAPACK (excluding -l
-#    and -L).
-#  LAPACK_LIBRARIES_DIR - Directories containing the LAPACK libraries.
-#     May be null if LAPACK_LIBRARIES contains libraries name using full path.
-#  LAPACK_LIBRARIES - List of libraries to link against LAPACK interface.
-#     May be null if the compiler supports auto-link (e.g. VC++).
-#  LAPACK_USE_FILE - The name of the cmake module to include to compile
-#     applications or libraries using LAPACK.
-#
-# This module was modified by CGAL team:
-# - find libraries for a C++ compiler, instead of Fortran
-# - added LAPACK_INCLUDE_DIR, LAPACK_DEFINITIONS and LAPACK_LIBRARIES_DIR
-# - removed LAPACK95_LIBRARIES
-
-
-include(CheckFunctionExists)
-include(CMakeFindDependencyMacro)
-
-# This macro checks for the existence of the combination of fortran libraries
-# given by _list.  If the combination is found, this macro checks (using the
-# check_function_exists macro) whether can link against that library
-# combination using the name of a routine given by _name using the linker
-# flags given by _flags.  If the combination of libraries is found and passes
-# the link test, LIBRARIES is set to the list of complete library paths that
-# have been found and DEFINITIONS to the required definitions.
-# Otherwise, LIBRARIES is set to FALSE.
-# N.B. _prefix is the prefix applied to the names of all cached variables that
-# are generated internally and marked advanced by this macro.
-macro(check_lapack_libraries DEFINITIONS LIBRARIES _prefix _name _flags _list _blas _path)
-  #message("DEBUG: check_lapack_libraries(${_list} in ${_path} with ${_blas})")
-
-  # Check for the existence of the libraries given by _list
-  set(_libraries_found TRUE)
-  set(_libraries_work FALSE)
-  set(${DEFINITIONS} "")
-  set(${LIBRARIES} "")
-  set(_combined_name)
-  foreach(_library ${_list})
-    set(_combined_name ${_combined_name}_${_library})
-
-    if(_libraries_found)
-      # search first in ${_path}
-      find_library(${_prefix}_${_library}_LIBRARY
-                  NAMES ${_library}
-                  PATHS ${_path} NO_DEFAULT_PATH
-                  )
-      # if not found, search in environment variables and system
-      if ( WIN32 )
-        find_library(${_prefix}_${_library}_LIBRARY
-                    NAMES ${_library}
-                    PATHS ENV LIB
-                    )
-      elseif ( APPLE )
-        find_library(${_prefix}_${_library}_LIBRARY
-                    NAMES ${_library}
-                    PATHS /usr/local/lib /usr/lib /usr/local/lib64 /usr/lib64 ENV DYLD_LIBRARY_PATH
-                    )
-      else ()
-        find_library(${_prefix}_${_library}_LIBRARY
-                    NAMES ${_library}
-                    PATHS /usr/local/lib /usr/lib /usr/local/lib64 /usr/lib64 ENV LD_LIBRARY_PATH
-                    )
-      endif()
-      mark_as_advanced(${_prefix}_${_library}_LIBRARY)
-      set(${LIBRARIES} ${${LIBRARIES}} ${${_prefix}_${_library}_LIBRARY})
-      set(_libraries_found ${${_prefix}_${_library}_LIBRARY})
-    endif()
-  endforeach()
-  if(_libraries_found)
-    set(_libraries_found ${${LIBRARIES}})
-  endif()
-
-  # Test this combination of libraries with the Fortran/f2c interface.
-  # We test the Fortran interface first as it is well standardized.
-  if(_libraries_found AND NOT _libraries_work)
-    set(${DEFINITIONS}  "-D${_prefix}_USE_F2C")
-    set(${LIBRARIES}    ${_libraries_found})
-    # Some C++ linkers require the f2c library to link with Fortran libraries.
-    # I do not know which ones, thus I just add the f2c library if it is available.
-    find_dependency( F2C QUIET )
-    if ( F2C_FOUND )
-      set(${DEFINITIONS}  ${${DEFINITIONS}} ${F2C_DEFINITIONS})
-      set(${LIBRARIES}    ${${LIBRARIES}} ${F2C_LIBRARIES})
-    endif()
-    set(CMAKE_REQUIRED_DEFINITIONS  ${${DEFINITIONS}})
-    set(CMAKE_REQUIRED_LIBRARIES    ${_flags} ${${LIBRARIES}} ${_blas})
-    #message("DEBUG: CMAKE_REQUIRED_DEFINITIONS = ${CMAKE_REQUIRED_DEFINITIONS}")
-    #message("DEBUG: CMAKE_REQUIRED_LIBRARIES = ${CMAKE_REQUIRED_LIBRARIES}")
-    # Check if function exists with f2c calling convention (ie a trailing underscore)
-    check_function_exists(${_name}_ ${_prefix}_${_name}_${_combined_name}_f2c_WORKS)
-    set(CMAKE_REQUIRED_DEFINITIONS} "")
-    set(CMAKE_REQUIRED_LIBRARIES    "")
-    mark_as_advanced(${_prefix}_${_name}_${_combined_name}_f2c_WORKS)
-    set(_libraries_work ${${_prefix}_${_name}_${_combined_name}_f2c_WORKS})
-  endif()
-
-  # If not found, test this combination of libraries with a C interface.
-  # A few implementations (ie ACML) provide a C interface. Unfortunately, there is no standard.
-  if(_libraries_found AND NOT _libraries_work)
-    set(${DEFINITIONS} "")
-    set(${LIBRARIES}   ${_libraries_found})
-    set(CMAKE_REQUIRED_DEFINITIONS "")
-    set(CMAKE_REQUIRED_LIBRARIES   ${_flags} ${${LIBRARIES}} ${_blas})
-    #message("DEBUG: CMAKE_REQUIRED_LIBRARIES = ${CMAKE_REQUIRED_LIBRARIES}")
-    check_function_exists(${_name} ${_prefix}_${_name}${_combined_name}_WORKS)
-    set(CMAKE_REQUIRED_LIBRARIES "")
-    mark_as_advanced(${_prefix}_${_name}${_combined_name}_WORKS)
-    set(_libraries_work ${${_prefix}_${_name}${_combined_name}_WORKS})
-  endif()
-
-  # on failure
-  if(NOT _libraries_work)
-    set(${DEFINITIONS} "")
-    set(${LIBRARIES}   FALSE)
-  endif()
-  #message("DEBUG: ${DEFINITIONS} = ${${DEFINITIONS}}")
-  #message("DEBUG: ${LIBRARIES} = ${${LIBRARIES}}")
-endmacro()
-
-
-#
-# main
-#
-
-# LAPACK requires BLAS
-if(LAPACK_FIND_QUIETLY OR NOT LAPACK_FIND_REQUIRED)
-  find_dependency(BLAS)
-else()
-  find_dependency(BLAS REQUIRED)
-endif()
-
-if (NOT BLAS_FOUND)
-
-  message(STATUS "LAPACK requires BLAS.")
-  set(LAPACK_FOUND FALSE)
-
-# Is it already configured?
-elseif (LAPACK_LIBRARIES_DIR OR LAPACK_LIBRARIES)
-
-  set(LAPACK_FOUND TRUE)
-
-else()
-
-  # reset variables
-  set( LAPACK_INCLUDE_DIR "" )
-  set( LAPACK_DEFINITIONS "" )
-  set( LAPACK_LINKER_FLAGS "" ) # unused (yet)
-  set( LAPACK_LIBRARIES "" )
-  set( LAPACK_LIBRARIES_DIR "" )
-
-    #
-    # If Unix, search for LAPACK function in possible libraries
-    #
-
-    #intel mkl lapack?
-    if(NOT LAPACK_LIBRARIES)
-      check_lapack_libraries(
-      LAPACK_DEFINITIONS
-      LAPACK_LIBRARIES
-      LAPACK
-      cheev
-      ""
-      "mkl_lapack"
-      "${BLAS_LIBRARIES}"
-      "${CGAL_TAUCS_LIBRARIES_DIR} ENV LAPACK_LIB_DIR"
-      )
-    endif()
-
-    #acml lapack?
-    if(NOT LAPACK_LIBRARIES)
-      check_lapack_libraries(
-      LAPACK_DEFINITIONS
-      LAPACK_LIBRARIES
-      LAPACK
-      cheev
-      ""
-      "acml"
-      "${BLAS_LIBRARIES}"
-      "${CGAL_TAUCS_LIBRARIES_DIR} ENV LAPACK_LIB_DIR"
-      )
-    endif()
-
-    # Apple LAPACK library?
-    if(NOT LAPACK_LIBRARIES)
-      check_lapack_libraries(
-      LAPACK_DEFINITIONS
-      LAPACK_LIBRARIES
-      LAPACK
-      cheev
-      ""
-      "Accelerate"
-      "${BLAS_LIBRARIES}"
-      "${CGAL_TAUCS_LIBRARIES_DIR} ENV LAPACK_LIB_DIR"
-      )
-    endif()
-
-    if ( NOT LAPACK_LIBRARIES )
-      check_lapack_libraries(
-      LAPACK_DEFINITIONS
-      LAPACK_LIBRARIES
-      LAPACK
-      cheev
-      ""
-      "vecLib"
-      "${BLAS_LIBRARIES}"
-      "${CGAL_TAUCS_LIBRARIES_DIR} ENV LAPACK_LIB_DIR"
-      )
-    endif ()
-
-    # Generic LAPACK library?
-    # This configuration *must* be the last try as this library is notably slow.
-    if ( NOT LAPACK_LIBRARIES )
-      check_lapack_libraries(
-      LAPACK_DEFINITIONS
-      LAPACK_LIBRARIES
-      LAPACK
-      cheev
-      ""
-      "lapack"
-      "${BLAS_LIBRARIES}"
-      "${CGAL_TAUCS_LIBRARIES_DIR} ENV LAPACK_LIB_DIR"
-      )
-    endif()
-
-  if(LAPACK_LIBRARIES_DIR OR LAPACK_LIBRARIES)
-    set(LAPACK_FOUND TRUE)
-  else()
-    set(LAPACK_FOUND FALSE)
-  endif()
-
-  if(NOT LAPACK_FIND_QUIETLY)
-    if(LAPACK_FOUND)
-      message(STATUS "A library with LAPACK API found.")
-    else()
-      if(LAPACK_FIND_REQUIRED)
-        message(FATAL_ERROR "A required library with LAPACK API not found. Please specify library location.")
-      else()
-        message(STATUS "A library with LAPACK API not found. Please specify library location.")
-      endif()
-    endif()
-  endif()
-
-  # Add variables to cache
-  set( LAPACK_INCLUDE_DIR   "${LAPACK_INCLUDE_DIR}"
-                            CACHE PATH "Directories containing the LAPACK header files" FORCE )
-  set( LAPACK_DEFINITIONS   "${LAPACK_DEFINITIONS}"
-                            CACHE STRING "Compilation options to use LAPACK" FORCE )
-  set( LAPACK_LINKER_FLAGS  "${LAPACK_LINKER_FLAGS}"
-                            CACHE STRING "Linker flags to use LAPACK" FORCE )
-  set( LAPACK_LIBRARIES     "${LAPACK_LIBRARIES}"
-                            CACHE FILEPATH "LAPACK libraries name" FORCE )
-  set( LAPACK_LIBRARIES_DIR "${LAPACK_LIBRARIES_DIR}"
-                            CACHE PATH "Directories containing the LAPACK libraries" FORCE )
-
-  #message("DEBUG: LAPACK_INCLUDE_DIR = ${LAPACK_INCLUDE_DIR}")
-  #message("DEBUG: LAPACK_DEFINITIONS = ${LAPACK_DEFINITIONS}")
-  #message("DEBUG: LAPACK_LINKER_FLAGS = ${LAPACK_LINKER_FLAGS}")
-  #message("DEBUG: LAPACK_LIBRARIES = ${LAPACK_LIBRARIES}")
-  #message("DEBUG: LAPACK_LIBRARIES_DIR = ${LAPACK_LIBRARIES_DIR}")
-  #message("DEBUG: LAPACK_FOUND = ${LAPACK_FOUND}")
-
-endif()
diff --git a/doc/PreprocessorDirectives.dox b/doc/PreprocessorDirectives.dox
index 79581a5..5a98539 100644
--- a/doc/PreprocessorDirectives.dox
+++ b/doc/PreprocessorDirectives.dox
@@ -55,23 +55,17 @@
 the information provided by the compiler.
 
  - \b EIGEN_MAX_CPP_VER - disables usage of C++ features requiring a version greater than EIGEN_MAX_CPP_VER.
-   Possible values are: 11, 14, 17, etc. If not defined (the default), %Eigen enables all features supported
+   Possible values are: 14, 17, etc. If not defined (the default), %Eigen enables all features supported
    by the compiler.
 
 Individual features can be explicitly enabled or disabled by defining the following token to 0 or 1 respectively.
-For instance, one might limit the C++ version to C++03 by defining EIGEN_MAX_CPP_VER=03, but still enable C99 math
+For instance, one might limit the C++ version to C++14 by defining EIGEN_MAX_CPP_VER=14, but still enable C99 math
 functions by defining EIGEN_HAS_C99_MATH=1.
 
  - \b EIGEN_HAS_C99_MATH - controls the usage of C99 math functions such as erf, erfc, lgamma, etc.
-   Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
  - \b EIGEN_HAS_CXX11_MATH - controls the implementation of some functions such as round, logp1, isinf, isnan, etc.
-   Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
  - \b EIGEN_HAS_STD_RESULT_OF - defines whether std::result_of is supported
-   Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
- - \b EIGEN_HAS_VARIADIC_TEMPLATES - defines whether variadic templates are supported
-   Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
  - \b EIGEN_HAS_CONSTEXPR - defines whether relaxed const expression are supported
-   Automatic detection disabled if EIGEN_MAX_CPP_VER<14.
  - \b EIGEN_NO_IO - Disables any usage and support for `<iostreams>`.
 
 \section TopicPreprocessorDirectivesAssertions Assertions
diff --git a/doc/examples/Cwise_erf.cpp b/doc/examples/Cwise_erf.cpp
index e7cd2c1..9ddc57d 100644
--- a/doc/examples/Cwise_erf.cpp
+++ b/doc/examples/Cwise_erf.cpp
@@ -1,9 +1,8 @@
 #include <Eigen/Core>
 #include <unsupported/Eigen/SpecialFunctions>
 #include <iostream>
-using namespace Eigen;
 int main()
 {
-  Array4d v(-0.5,2,0,-7);
+  Eigen::Array4d v(-0.5,2,0,-7);
   std::cout << v.erf() << std::endl;
 }
diff --git a/doc/examples/Cwise_erfc.cpp b/doc/examples/Cwise_erfc.cpp
index d8bb04c..4b7902c 100644
--- a/doc/examples/Cwise_erfc.cpp
+++ b/doc/examples/Cwise_erfc.cpp
@@ -1,9 +1,8 @@
 #include <Eigen/Core>
 #include <unsupported/Eigen/SpecialFunctions>
 #include <iostream>
-using namespace Eigen;
 int main()
 {
-  Array4d v(-0.5,2,0,-7);
+  Eigen::Array4d v(-0.5,2,0,-7);
   std::cout << v.erfc() << std::endl;
 }
diff --git a/doc/examples/Cwise_lgamma.cpp b/doc/examples/Cwise_lgamma.cpp
index 6bfaccb..f3c9fe6 100644
--- a/doc/examples/Cwise_lgamma.cpp
+++ b/doc/examples/Cwise_lgamma.cpp
@@ -1,9 +1,8 @@
 #include <Eigen/Core>
 #include <unsupported/Eigen/SpecialFunctions>
 #include <iostream>
-using namespace Eigen;
 int main()
 {
-  Array4d v(0.5,10,0,-1);
+  Eigen::Array4d v(0.5,10,0,-1);
   std::cout << v.lgamma() << std::endl;
 }
diff --git a/doc/examples/DenseBase_middleCols_int.cpp b/doc/examples/DenseBase_middleCols_int.cpp
index 0ebd955..d05a552 100644
--- a/doc/examples/DenseBase_middleCols_int.cpp
+++ b/doc/examples/DenseBase_middleCols_int.cpp
@@ -1,15 +1,12 @@
 #include <Eigen/Core>
 #include <iostream>
 
-using namespace Eigen;
-using namespace std;
-
-int main(void)
+int main()
 {
     int const N = 5;
-    MatrixXi A(N,N);
+    Eigen::MatrixXi A(N,N);
     A.setRandom();
-    cout << "A =\n" << A << '\n' << endl;
-    cout << "A(1..3,:) =\n" << A.middleCols(1,3) << endl;
+    std::cout << "A =\n" << A << '\n' << std::endl;
+    std::cout << "A(1..3,:) =\n" << A.middleCols(1,3) << std::endl;
     return 0;
 }
diff --git a/doc/examples/DenseBase_middleRows_int.cpp b/doc/examples/DenseBase_middleRows_int.cpp
index a6fe9e8..8651629 100644
--- a/doc/examples/DenseBase_middleRows_int.cpp
+++ b/doc/examples/DenseBase_middleRows_int.cpp
@@ -1,15 +1,12 @@
 #include <Eigen/Core>
 #include <iostream>
 
-using namespace Eigen;
-using namespace std;
-
-int main(void)
+int main()
 {
     int const N = 5;
-    MatrixXi A(N,N);
+    Eigen::MatrixXi A(N,N);
     A.setRandom();
-    cout << "A =\n" << A << '\n' << endl;
-    cout << "A(2..3,:) =\n" << A.middleRows(2,2) << endl;
+    std::cout << "A =\n" << A << '\n' << std::endl;
+    std::cout << "A(2..3,:) =\n" << A.middleRows(2,2) << std::endl;
     return 0;
 }
diff --git a/doc/examples/DenseBase_template_int_middleCols.cpp b/doc/examples/DenseBase_template_int_middleCols.cpp
index 6191d79..caefabf 100644
--- a/doc/examples/DenseBase_template_int_middleCols.cpp
+++ b/doc/examples/DenseBase_template_int_middleCols.cpp
@@ -1,15 +1,12 @@
 #include <Eigen/Core>
 #include <iostream>
 
-using namespace Eigen;
-using namespace std;
-
-int main(void)
+int main()
 {
     int const N = 5;
-    MatrixXi A(N,N);
+    Eigen::MatrixXi A(N,N);
     A.setRandom();
-    cout << "A =\n" << A << '\n' << endl;
-    cout << "A(:,1..3) =\n" << A.middleCols<3>(1) << endl;
+    std::cout << "A =\n" << A << '\n' << std::endl;
+    std::cout << "A(:,1..3) =\n" << A.middleCols<3>(1) << std::endl;
     return 0;
 }
diff --git a/doc/examples/DenseBase_template_int_middleRows.cpp b/doc/examples/DenseBase_template_int_middleRows.cpp
index 7e8b657..ed5b295 100644
--- a/doc/examples/DenseBase_template_int_middleRows.cpp
+++ b/doc/examples/DenseBase_template_int_middleRows.cpp
@@ -1,15 +1,12 @@
 #include <Eigen/Core>
 #include <iostream>
 
-using namespace Eigen;
-using namespace std;
-
-int main(void)
+int main()
 {
     int const N = 5;
-    MatrixXi A(N,N);
+    Eigen::MatrixXi A(N,N);
     A.setRandom();
-    cout << "A =\n" << A << '\n' << endl;
-    cout << "A(1..3,:) =\n" << A.middleRows<3>(1) << endl;
+    std::cout << "A =\n" << A << '\n' << std::endl;
+    std::cout << "A(1..3,:) =\n" << A.middleRows<3>(1) << std::endl;
     return 0;
 }
diff --git a/doc/examples/QuickStart_example2_dynamic.cpp b/doc/examples/QuickStart_example2_dynamic.cpp
index ff6746e..bc8d326 100644
--- a/doc/examples/QuickStart_example2_dynamic.cpp
+++ b/doc/examples/QuickStart_example2_dynamic.cpp
@@ -1,15 +1,15 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace Eigen;
-using namespace std;
+using Eigen::MatrixXd;
+using Eigen::VectorXd;
 
 int main()
 {
   MatrixXd m = MatrixXd::Random(3,3);
   m = (m + MatrixXd::Constant(3,3,1.2)) * 50;
-  cout << "m =" << endl << m << endl;
+  std::cout << "m =" << std::endl << m << std::endl;
   VectorXd v(3);
   v << 1, 2, 3;
-  cout << "m * v =" << endl << m * v << endl;
+  std::cout << "m * v =" << std::endl << m * v << std::endl;
 }
diff --git a/doc/examples/QuickStart_example2_fixed.cpp b/doc/examples/QuickStart_example2_fixed.cpp
index d911752..af6f9a9 100644
--- a/doc/examples/QuickStart_example2_fixed.cpp
+++ b/doc/examples/QuickStart_example2_fixed.cpp
@@ -1,15 +1,15 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace Eigen;
-using namespace std;
+using Eigen::Matrix3d;
+using Eigen::Vector3d;
 
 int main()
 {
   Matrix3d m = Matrix3d::Random();
   m = (m + Matrix3d::Constant(1.2)) * 50;
-  cout << "m =" << endl << m << endl;
+  std::cout << "m =" << std::endl << m << std::endl;
   Vector3d v(1,2,3);
   
-  cout << "m * v =" << endl << m * v << endl;
+  std::cout << "m * v =" << std::endl << m * v << std::endl;
 }
diff --git a/doc/examples/TemplateKeyword_flexible.cpp b/doc/examples/TemplateKeyword_flexible.cpp
index 9d85292..efe458b 100644
--- a/doc/examples/TemplateKeyword_flexible.cpp
+++ b/doc/examples/TemplateKeyword_flexible.cpp
@@ -1,19 +1,17 @@
 #include <Eigen/Dense>
 #include <iostream>
 
-using namespace Eigen;
-
 template <typename Derived1, typename Derived2>
-void copyUpperTriangularPart(MatrixBase<Derived1>& dst, const MatrixBase<Derived2>& src)
+void copyUpperTriangularPart(Eigen::MatrixBase<Derived1>& dst, const Eigen::MatrixBase<Derived2>& src)
 {
   /* Note the 'template' keywords in the following line! */
-  dst.template triangularView<Upper>() = src.template triangularView<Upper>();
+  dst.template triangularView<Eigen::Upper>() = src.template triangularView<Eigen::Upper>();
 }
 
 int main()
 {
-  MatrixXi m1 = MatrixXi::Ones(5,5);
-  MatrixXi m2 = MatrixXi::Random(4,4);
+  Eigen::MatrixXi m1 = Eigen::MatrixXi::Ones(5,5);
+  Eigen::MatrixXi m2 = Eigen::MatrixXi::Random(4,4);
   std::cout << "m2 before copy:" << std::endl;
   std::cout << m2 << std::endl << std::endl;
   copyUpperTriangularPart(m2, m1.topLeftCorner(4,4));
diff --git a/doc/examples/TemplateKeyword_simple.cpp b/doc/examples/TemplateKeyword_simple.cpp
index 6998c17..6b946ad 100644
--- a/doc/examples/TemplateKeyword_simple.cpp
+++ b/doc/examples/TemplateKeyword_simple.cpp
@@ -1,11 +1,11 @@
 #include <Eigen/Dense>
 #include <iostream>
 
-using namespace Eigen;
+using Eigen::MatrixXf;
 
 void copyUpperTriangularPart(MatrixXf& dst, const MatrixXf& src)
 {
-  dst.triangularView<Upper>() = src.triangularView<Upper>();
+  dst.triangularView<Eigen::Upper>() = src.triangularView<Eigen::Upper>();
 }
 
 int main()
diff --git a/doc/examples/TutorialInplaceLU.cpp b/doc/examples/TutorialInplaceLU.cpp
index cb9c59b..72bead2 100644
--- a/doc/examples/TutorialInplaceLU.cpp
+++ b/doc/examples/TutorialInplaceLU.cpp
@@ -1,61 +1,57 @@
 #include <iostream>
 struct init {
-  init() { std::cout << "[" << "init" << "]" << std::endl; }
+  init() { std::cout << "[init]\n"; }
 };
 init init_obj;
 // [init]
-#include <iostream>
 #include <Eigen/Dense>
 
-using namespace std;
-using namespace Eigen;
-
 int main()
 {
-  MatrixXd A(2,2);
+  Eigen::MatrixXd A(2,2);
   A << 2, -1, 1, 3;
-  cout << "Here is the input matrix A before decomposition:\n" << A << endl;
-cout << "[init]" << endl;
+  std::cout << "Here is the input matrix A before decomposition:\n" << A << "\n";
+  std::cout << "[init]\n";
 
-cout << "[declaration]" << endl;
-  PartialPivLU<Ref<MatrixXd> > lu(A);
-  cout << "Here is the input matrix A after decomposition:\n" << A << endl;
-cout << "[declaration]" << endl;
+  std::cout << "[declaration]\n";
+  Eigen::PartialPivLU<Eigen::Ref<Eigen::MatrixXd> > lu(A);
+  std::cout << "Here is the input matrix A after decomposition:\n" << A << "\n";
+  std::cout << "[declaration]\n";
 
-cout << "[matrixLU]" << endl;
-  cout << "Here is the matrix storing the L and U factors:\n" << lu.matrixLU() << endl;
-cout << "[matrixLU]" << endl;
+  std::cout << "[matrixLU]\n";
+  std::cout << "Here is the matrix storing the L and U factors:\n" << lu.matrixLU() << "\n";
+  std::cout << "[matrixLU]\n";
 
-cout << "[solve]" << endl;
-  MatrixXd A0(2,2); A0 << 2, -1, 1, 3;
-  VectorXd b(2);    b << 1, 2;
-  VectorXd x = lu.solve(b);
-  cout << "Residual: " << (A0 * x - b).norm() << endl;
-cout << "[solve]" << endl;
+  std::cout << "[solve]\n";
+  Eigen::MatrixXd A0(2,2); A0 << 2, -1, 1, 3;
+  Eigen::VectorXd b(2);    b << 1, 2;
+  Eigen::VectorXd x = lu.solve(b);
+  std::cout << "Residual: " << (A0 * x - b).norm() << "\n";
+  std::cout << "[solve]\n";
 
-cout << "[modifyA]" << endl;
+  std::cout << "[modifyA]\n";
   A << 3, 4, -2, 1;
   x = lu.solve(b);
-  cout << "Residual: " << (A0 * x - b).norm() << endl;
-cout << "[modifyA]" << endl;
+  std::cout << "Residual: " << (A0 * x - b).norm() << "\n";
+  std::cout << "[modifyA]\n";
 
-cout << "[recompute]" << endl;
+  std::cout << "[recompute]\n";
   A0 = A; // save A
   lu.compute(A);
   x = lu.solve(b);
-  cout << "Residual: " << (A0 * x - b).norm() << endl;
-cout << "[recompute]" << endl;
+  std::cout << "Residual: " << (A0 * x - b).norm() << "\n";
+  std::cout << "[recompute]\n";
 
-cout << "[recompute_bis0]" << endl;
-  MatrixXd A1(2,2);
+  std::cout << "[recompute_bis0]\n";
+  Eigen::MatrixXd A1(2,2);
   A1 << 5,-2,3,4;
   lu.compute(A1);
-  cout << "Here is the input matrix A1 after decomposition:\n" << A1 << endl;
-cout << "[recompute_bis0]" << endl;
+  std::cout << "Here is the input matrix A1 after decomposition:\n" << A1 << "\n";
+  std::cout << "[recompute_bis0]\n";
 
-cout << "[recompute_bis1]" << endl;
+  std::cout << "[recompute_bis1]\n";
   x = lu.solve(b);
-  cout << "Residual: " << (A1 * x - b).norm() << endl;
-cout << "[recompute_bis1]" << endl;
+  std::cout << "Residual: " << (A1 * x - b).norm() << "\n";
+  std::cout << "[recompute_bis1]\n";
 
 }
diff --git a/doc/examples/TutorialLinAlgComputeTwice.cpp b/doc/examples/TutorialLinAlgComputeTwice.cpp
index 06ba646..a561f08 100644
--- a/doc/examples/TutorialLinAlgComputeTwice.cpp
+++ b/doc/examples/TutorialLinAlgComputeTwice.cpp
@@ -1,23 +1,20 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace std;
-using namespace Eigen;
-
 int main()
 {
-   Matrix2f A, b;
-   LLT<Matrix2f> llt;
+   Eigen::Matrix2f A, b;
+   Eigen::LLT<Eigen::Matrix2f> llt;
    A << 2, -1, -1, 3;
    b << 1, 2, 3, 1;
-   cout << "Here is the matrix A:\n" << A << endl;
-   cout << "Here is the right hand side b:\n" << b << endl;
-   cout << "Computing LLT decomposition..." << endl;
+   std::cout << "Here is the matrix A:\n" << A << std::endl;
+   std::cout << "Here is the right hand side b:\n" << b << std::endl;
+   std::cout << "Computing LLT decomposition..." << std::endl;
    llt.compute(A);
-   cout << "The solution is:\n" << llt.solve(b) << endl;
+   std::cout << "The solution is:\n" << llt.solve(b) << std::endl;
    A(1,1)++;
-   cout << "The matrix A is now:\n" << A << endl;
-   cout << "Computing LLT decomposition..." << endl;
+   std::cout << "The matrix A is now:\n" << A << std::endl;
+   std::cout << "Computing LLT decomposition..." << std::endl;
    llt.compute(A);
-   cout << "The solution is now:\n" << llt.solve(b) << endl;
+   std::cout << "The solution is now:\n" << llt.solve(b) << std::endl;
 }
diff --git a/doc/examples/TutorialLinAlgExComputeSolveError.cpp b/doc/examples/TutorialLinAlgExComputeSolveError.cpp
index f362fb7..199f3f5 100644
--- a/doc/examples/TutorialLinAlgExComputeSolveError.cpp
+++ b/doc/examples/TutorialLinAlgExComputeSolveError.cpp
@@ -1,8 +1,7 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace std;
-using namespace Eigen;
+using Eigen::MatrixXd;
 
 int main()
 {
@@ -10,5 +9,5 @@
    MatrixXd b = MatrixXd::Random(100,50);
    MatrixXd x = A.fullPivLu().solve(b);
    double relative_error = (A*x - b).norm() / b.norm(); // norm() is L2 norm
-   cout << "The relative error is:\n" << relative_error << endl;
+   std::cout << "The relative error is:\n" << relative_error << std::endl;
 }
diff --git a/doc/examples/TutorialLinAlgExSolveColPivHouseholderQR.cpp b/doc/examples/TutorialLinAlgExSolveColPivHouseholderQR.cpp
index 3a99a94..5ee6b6a 100644
--- a/doc/examples/TutorialLinAlgExSolveColPivHouseholderQR.cpp
+++ b/doc/examples/TutorialLinAlgExSolveColPivHouseholderQR.cpp
@@ -1,17 +1,14 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace std;
-using namespace Eigen;
-
 int main()
 {
-   Matrix3f A;
-   Vector3f b;
+   Eigen::Matrix3f A;
+   Eigen::Vector3f b;
    A << 1,2,3,  4,5,6,  7,8,10;
    b << 3, 3, 4;
-   cout << "Here is the matrix A:\n" << A << endl;
-   cout << "Here is the vector b:\n" << b << endl;
-   Vector3f x = A.colPivHouseholderQr().solve(b);
-   cout << "The solution is:\n" << x << endl;
+   std::cout << "Here is the matrix A:\n" << A << std::endl;
+   std::cout << "Here is the vector b:\n" << b << std::endl;
+   Eigen::Vector3f x = A.colPivHouseholderQr().solve(b);
+   std::cout << "The solution is:\n" << x << std::endl;
 }
diff --git a/doc/examples/TutorialLinAlgExSolveLDLT.cpp b/doc/examples/TutorialLinAlgExSolveLDLT.cpp
index f8beacd..82186d4 100644
--- a/doc/examples/TutorialLinAlgExSolveLDLT.cpp
+++ b/doc/examples/TutorialLinAlgExSolveLDLT.cpp
@@ -1,16 +1,13 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace std;
-using namespace Eigen;
-
 int main()
 {
-   Matrix2f A, b;
+   Eigen::Matrix2f A, b;
    A << 2, -1, -1, 3;
    b << 1, 2, 3, 1;
-   cout << "Here is the matrix A:\n" << A << endl;
-   cout << "Here is the right hand side b:\n" << b << endl;
-   Matrix2f x = A.ldlt().solve(b);
-   cout << "The solution is:\n" << x << endl;
+   std::cout << "Here is the matrix A:\n" << A << std::endl;
+   std::cout << "Here is the right hand side b:\n" << b << std::endl;
+   Eigen::Matrix2f x = A.ldlt().solve(b);
+   std::cout << "The solution is:\n" << x << std::endl;
 }
diff --git a/doc/examples/TutorialLinAlgInverseDeterminant.cpp b/doc/examples/TutorialLinAlgInverseDeterminant.cpp
index 14dde5b..b31a92a 100644
--- a/doc/examples/TutorialLinAlgInverseDeterminant.cpp
+++ b/doc/examples/TutorialLinAlgInverseDeterminant.cpp
@@ -1,16 +1,13 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace std;
-using namespace Eigen;
-
 int main()
 {
-   Matrix3f A;
+   Eigen::Matrix3f A;
    A << 1, 2, 1,
         2, 1, 0,
         -1, 1, 2;
-   cout << "Here is the matrix A:\n" << A << endl;
-   cout << "The determinant of A is " << A.determinant() << endl;
-   cout << "The inverse of A is:\n" << A.inverse() << endl;
+   std::cout << "Here is the matrix A:\n" << A << std::endl;
+   std::cout << "The determinant of A is " << A.determinant() << std::endl;
+   std::cout << "The inverse of A is:\n" << A.inverse() << std::endl;
 }
diff --git a/doc/examples/TutorialLinAlgRankRevealing.cpp b/doc/examples/TutorialLinAlgRankRevealing.cpp
index c516507..fea52ab 100644
--- a/doc/examples/TutorialLinAlgRankRevealing.cpp
+++ b/doc/examples/TutorialLinAlgRankRevealing.cpp
@@ -1,20 +1,17 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace std;
-using namespace Eigen;
-
 int main()
 {
-   Matrix3f A;
+   Eigen::Matrix3f A;
    A << 1, 2, 5,
         2, 1, 4,
         3, 0, 3;
-   cout << "Here is the matrix A:\n" << A << endl;
-   FullPivLU<Matrix3f> lu_decomp(A);
-   cout << "The rank of A is " << lu_decomp.rank() << endl;
-   cout << "Here is a matrix whose columns form a basis of the null-space of A:\n"
-        << lu_decomp.kernel() << endl;
-   cout << "Here is a matrix whose columns form a basis of the column-space of A:\n"
-        << lu_decomp.image(A) << endl; // yes, have to pass the original A
+   std::cout << "Here is the matrix A:\n" << A << std::endl;
+   Eigen::FullPivLU<Eigen::Matrix3f> lu_decomp(A);
+   std::cout << "The rank of A is " << lu_decomp.rank() << std::endl;
+   std::cout << "Here is a matrix whose columns form a basis of the null-space of A:\n"
+        << lu_decomp.kernel() << std::endl;
+   std::cout << "Here is a matrix whose columns form a basis of the column-space of A:\n"
+        << lu_decomp.image(A) << std::endl; // yes, have to pass the original A
 }
diff --git a/doc/examples/TutorialLinAlgSVDSolve.cpp b/doc/examples/TutorialLinAlgSVDSolve.cpp
index f109f04..23ad422 100644
--- a/doc/examples/TutorialLinAlgSVDSolve.cpp
+++ b/doc/examples/TutorialLinAlgSVDSolve.cpp
@@ -1,15 +1,12 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace std;
-using namespace Eigen;
-
 int main()
 {
-   MatrixXf A = MatrixXf::Random(3, 2);
-   cout << "Here is the matrix A:\n" << A << endl;
-   VectorXf b = VectorXf::Random(3);
-   cout << "Here is the right hand side b:\n" << b << endl;
-   cout << "The least-squares solution is:\n"
-        << A.bdcSvd(ComputeThinU | ComputeThinV).solve(b) << endl;
+   Eigen::MatrixXf A = Eigen::MatrixXf::Random(3, 2);
+   std::cout << "Here is the matrix A:\n" << A << std::endl;
+   Eigen::VectorXf b = Eigen::VectorXf::Random(3);
+   std::cout << "Here is the right hand side b:\n" << b << std::endl;
+   std::cout << "The least-squares solution is:\n"
+        << A.bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(b) << std::endl;
 }
diff --git a/doc/examples/TutorialLinAlgSelfAdjointEigenSolver.cpp b/doc/examples/TutorialLinAlgSelfAdjointEigenSolver.cpp
index 8d1d1ed..fcf2f33 100644
--- a/doc/examples/TutorialLinAlgSelfAdjointEigenSolver.cpp
+++ b/doc/examples/TutorialLinAlgSelfAdjointEigenSolver.cpp
@@ -1,18 +1,15 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace std;
-using namespace Eigen;
-
 int main()
 {
-   Matrix2f A;
+   Eigen::Matrix2f A;
    A << 1, 2, 2, 3;
-   cout << "Here is the matrix A:\n" << A << endl;
-   SelfAdjointEigenSolver<Matrix2f> eigensolver(A);
-   if (eigensolver.info() != Success) abort();
-   cout << "The eigenvalues of A are:\n" << eigensolver.eigenvalues() << endl;
-   cout << "Here's a matrix whose columns are eigenvectors of A \n"
+   std::cout << "Here is the matrix A:\n" << A << std::endl;
+   Eigen::SelfAdjointEigenSolver<Eigen::Matrix2f> eigensolver(A);
+   if (eigensolver.info() != Eigen::Success) abort();
+   std::cout << "The eigenvalues of A are:\n" << eigensolver.eigenvalues() << std::endl;
+   std::cout << "Here's a matrix whose columns are eigenvectors of A \n"
         << "corresponding to these eigenvalues:\n"
-        << eigensolver.eigenvectors() << endl;
+        << eigensolver.eigenvectors() << std::endl;
 }
diff --git a/doc/examples/TutorialLinAlgSetThreshold.cpp b/doc/examples/TutorialLinAlgSetThreshold.cpp
index 3956b13..e1335e7 100644
--- a/doc/examples/TutorialLinAlgSetThreshold.cpp
+++ b/doc/examples/TutorialLinAlgSetThreshold.cpp
@@ -1,16 +1,13 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace std;
-using namespace Eigen;
-
 int main()
 {
-   Matrix2d A;
+   Eigen::Matrix2d A;
    A << 2, 1,
         2, 0.9999999999;
-   FullPivLU<Matrix2d> lu(A);
-   cout << "By default, the rank of A is found to be " << lu.rank() << endl;
+   Eigen::FullPivLU<Eigen::Matrix2d> lu(A);
+   std::cout << "By default, the rank of A is found to be " << lu.rank() << std::endl;
    lu.setThreshold(1e-5);
-   cout << "With threshold 1e-5, the rank of A is found to be " << lu.rank() << endl;
+   std::cout << "With threshold 1e-5, the rank of A is found to be " << lu.rank() << std::endl;
 }
diff --git a/doc/examples/Tutorial_ArrayClass_accessors.cpp b/doc/examples/Tutorial_ArrayClass_accessors.cpp
index dc720ff..0db52a3 100644
--- a/doc/examples/Tutorial_ArrayClass_accessors.cpp
+++ b/doc/examples/Tutorial_ArrayClass_accessors.cpp
@@ -1,24 +1,21 @@
 #include <Eigen/Dense>
 #include <iostream>
 
-using namespace Eigen;
-using namespace std;
-
 int main()
 {
-  ArrayXXf  m(2,2);
+  Eigen::ArrayXXf  m(2,2);
   
   // assign some values coefficient by coefficient
   m(0,0) = 1.0; m(0,1) = 2.0;
   m(1,0) = 3.0; m(1,1) = m(0,1) + m(1,0);
   
   // print values to standard output
-  cout << m << endl << endl;
+  std::cout << m << std::endl << std::endl;
  
   // using the comma-initializer is also allowed
   m << 1.0,2.0,
        3.0,4.0;
      
   // print values to standard output
-  cout << m << endl;
+  std::cout << m << std::endl;
 }
diff --git a/doc/examples/Tutorial_ArrayClass_addition.cpp b/doc/examples/Tutorial_ArrayClass_addition.cpp
index 480ffb0..4a407a7 100644
--- a/doc/examples/Tutorial_ArrayClass_addition.cpp
+++ b/doc/examples/Tutorial_ArrayClass_addition.cpp
@@ -1,13 +1,10 @@
 #include <Eigen/Dense>
 #include <iostream>
 
-using namespace Eigen;
-using namespace std;
-
 int main()
 {
-  ArrayXXf a(3,3);
-  ArrayXXf b(3,3);
+  Eigen::ArrayXXf a(3,3);
+  Eigen::ArrayXXf b(3,3);
   a << 1,2,3,
        4,5,6,
        7,8,9;
@@ -16,8 +13,8 @@
        1,2,3;
        
   // Adding two arrays
-  cout << "a + b = " << endl << a + b << endl << endl;
+  std::cout << "a + b = " << std::endl << a + b << std::endl << std::endl;
 
   // Subtracting a scalar from an array
-  cout << "a - 2 = " << endl << a - 2 << endl;
+  std::cout << "a - 2 = " << std::endl << a - 2 << std::endl;
 }
diff --git a/doc/examples/Tutorial_ArrayClass_cwise_other.cpp b/doc/examples/Tutorial_ArrayClass_cwise_other.cpp
index d9046c6..12483f3 100644
--- a/doc/examples/Tutorial_ArrayClass_cwise_other.cpp
+++ b/doc/examples/Tutorial_ArrayClass_cwise_other.cpp
@@ -1,19 +1,16 @@
 #include <Eigen/Dense>
 #include <iostream>
 
-using namespace Eigen;
-using namespace std;
-
 int main()
 {
-  ArrayXf a = ArrayXf::Random(5);
+  Eigen::ArrayXf a = Eigen::ArrayXf::Random(5);
   a *= 2;
-  cout << "a =" << endl 
-       << a << endl;
-  cout << "a.abs() =" << endl 
-       << a.abs() << endl;
-  cout << "a.abs().sqrt() =" << endl 
-       << a.abs().sqrt() << endl;
-  cout << "a.min(a.abs().sqrt()) =" << endl 
-       << a.min(a.abs().sqrt()) << endl;
+  std::cout << "a =" << std::endl
+            << a << std::endl;
+  std::cout << "a.abs() =" << std::endl
+            << a.abs() << std::endl;
+  std::cout << "a.abs().sqrt() =" << std::endl
+            << a.abs().sqrt() << std::endl;
+  std::cout << "a.min(a.abs().sqrt()) =" << std::endl
+            << a.min(a.abs().sqrt()) << std::endl;
 }
diff --git a/doc/examples/Tutorial_ArrayClass_interop.cpp b/doc/examples/Tutorial_ArrayClass_interop.cpp
index 371f070..c9a8352 100644
--- a/doc/examples/Tutorial_ArrayClass_interop.cpp
+++ b/doc/examples/Tutorial_ArrayClass_interop.cpp
@@ -1,8 +1,7 @@
 #include <Eigen/Dense>
 #include <iostream>
 
-using namespace Eigen;
-using namespace std;
+using Eigen::MatrixXf;
 
 int main()
 {
@@ -16,7 +15,7 @@
        7,8;
   
   result = (m.array() + 4).matrix() * m;
-  cout << "-- Combination 1: --" << endl << result << endl << endl;
+  std::cout << "-- Combination 1: --\n" << result << "\n\n";
   result = (m.array() * n.array()).matrix() * m;
-  cout << "-- Combination 2: --" << endl << result << endl << endl;
+  std::cout << "-- Combination 2: --\n" << result << "\n\n";
 }
diff --git a/doc/examples/Tutorial_ArrayClass_interop_matrix.cpp b/doc/examples/Tutorial_ArrayClass_interop_matrix.cpp
index 1014275..07ec9b0 100644
--- a/doc/examples/Tutorial_ArrayClass_interop_matrix.cpp
+++ b/doc/examples/Tutorial_ArrayClass_interop_matrix.cpp
@@ -1,8 +1,7 @@
 #include <Eigen/Dense>
 #include <iostream>
 
-using namespace Eigen;
-using namespace std;
+using Eigen::MatrixXf;
 
 int main()
 {
@@ -16,11 +15,11 @@
        7,8;
 
   result = m * n;
-  cout << "-- Matrix m*n: --" << endl << result << endl << endl;
+  std::cout << "-- Matrix m*n: --\n" << result << "\n\n";
   result = m.array() * n.array();
-  cout << "-- Array m*n: --" << endl << result << endl << endl;
+  std::cout << "-- Array m*n: --\n" << result << "\n\n";
   result = m.cwiseProduct(n);
-  cout << "-- With cwiseProduct: --" << endl << result << endl << endl;
+  std::cout << "-- With cwiseProduct: --\n" << result << "\n\n";
   result = m.array() + 4;
-  cout << "-- Array m + 4: --" << endl << result << endl << endl;
+  std::cout << "-- Array m + 4: --\n" << result << "\n\n";
 }
diff --git a/doc/examples/Tutorial_ArrayClass_mult.cpp b/doc/examples/Tutorial_ArrayClass_mult.cpp
index 6cb439f..bada36c 100644
--- a/doc/examples/Tutorial_ArrayClass_mult.cpp
+++ b/doc/examples/Tutorial_ArrayClass_mult.cpp
@@ -1,16 +1,13 @@
 #include <Eigen/Dense>
 #include <iostream>
 
-using namespace Eigen;
-using namespace std;
-
 int main()
 {
-  ArrayXXf a(2,2);
-  ArrayXXf b(2,2);
+  Eigen::ArrayXXf a(2,2);
+  Eigen::ArrayXXf b(2,2);
   a << 1,2,
        3,4;
   b << 5,6,
        7,8;
-  cout << "a * b = " << endl << a * b << endl;
+  std::cout << "a * b = " << std::endl << a * b << std::endl;
 }
diff --git a/doc/examples/Tutorial_BlockOperations_block_assignment.cpp b/doc/examples/Tutorial_BlockOperations_block_assignment.cpp
index 0b87313..26ad478 100644
--- a/doc/examples/Tutorial_BlockOperations_block_assignment.cpp
+++ b/doc/examples/Tutorial_BlockOperations_block_assignment.cpp
@@ -1,18 +1,15 @@
 #include <Eigen/Dense>
 #include <iostream>
 
-using namespace std;
-using namespace Eigen;
-
 int main()
 {
-  Array22f m;
+  Eigen::Array22f m;
   m << 1,2,
        3,4;
-  Array44f a = Array44f::Constant(0.6);
-  cout << "Here is the array a:" << endl << a << endl << endl;
+  Eigen::Array44f a = Eigen::Array44f::Constant(0.6);
+  std::cout << "Here is the array a:\n" << a << "\n\n";
   a.block<2,2>(1,1) = m;
-  cout << "Here is now a with m copied into its central 2x2 block:" << endl << a << endl << endl;
+  std::cout << "Here is now a with m copied into its central 2x2 block:\n" << a << "\n\n";
   a.block(0,0,2,3) = a.block(2,1,2,3);
-  cout << "Here is now a with bottom-right 2x3 block copied into top-left 2x3 block:" << endl << a << endl << endl;
+  std::cout << "Here is now a with bottom-right 2x3 block copied into top-left 2x3 block:\n" << a << "\n\n";
 }
diff --git a/doc/examples/Tutorial_PartialLU_solve.cpp b/doc/examples/Tutorial_PartialLU_solve.cpp
index a560879..ca72c99 100644
--- a/doc/examples/Tutorial_PartialLU_solve.cpp
+++ b/doc/examples/Tutorial_PartialLU_solve.cpp
@@ -2,17 +2,14 @@
 #include <Eigen/LU>
 #include <iostream>
 
-using namespace std;
-using namespace Eigen;
-
 int main()
 {
-   Matrix3f A;
-   Vector3f b;
+   Eigen::Matrix3f A;
+   Eigen::Vector3f b;
    A << 1,2,3,  4,5,6,  7,8,10;
    b << 3, 3, 4;
-   cout << "Here is the matrix A:" << endl << A << endl;
-   cout << "Here is the vector b:" << endl << b << endl;
-   Vector3f x = A.lu().solve(b);
-   cout << "The solution is:" << endl << x << endl;
+   std::cout << "Here is the matrix A:" << std::endl << A << std::endl;
+   std::cout << "Here is the vector b:" << std::endl << b << std::endl;
+   Eigen::Vector3f x = A.lu().solve(b);
+   std::cout << "The solution is:" << std::endl << x << std::endl;
 }
diff --git a/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.cpp b/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.cpp
index 334b4d8..8ef06be 100644
--- a/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.cpp
+++ b/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_broadcast_1nn.cpp
@@ -1,9 +1,6 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace std;
-using namespace Eigen;
-
 int main()
 {
   Eigen::MatrixXf m(2,4);
@@ -15,10 +12,10 @@
   v << 2,
        3;
 
-  MatrixXf::Index index;
+  Eigen::Index index;
   // find nearest neighbour
   (m.colwise() - v).colwise().squaredNorm().minCoeff(&index);
 
-  cout << "Nearest neighbour is column " << index << ":" << endl;
-  cout << m.col(index) << endl;
+  std::cout << "Nearest neighbour is column " << index << ":" << std::endl;
+  std::cout << m.col(index) << std::endl;
 }
diff --git a/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_maxnorm.cpp b/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_maxnorm.cpp
index 049c747..b5d88c3 100644
--- a/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_maxnorm.cpp
+++ b/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_maxnorm.cpp
@@ -1,15 +1,13 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace std;
-using namespace Eigen;
 int main()
 {
-  MatrixXf mat(2,4);
+  Eigen::MatrixXf mat(2,4);
   mat << 1, 2, 6, 9,
          3, 1, 7, 2;
   
-  MatrixXf::Index   maxIndex;
+  Eigen::Index   maxIndex;
   float maxNorm = mat.colwise().sum().maxCoeff(&maxIndex);
   
   std::cout << "Maximum sum at position " << maxIndex << std::endl;
diff --git a/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.cpp b/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.cpp
index 0cca37f..7b89bcf 100644
--- a/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.cpp
+++ b/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_bool.cpp
@@ -1,21 +1,18 @@
 #include <Eigen/Dense>
 #include <iostream>
 
-using namespace std;
-using namespace Eigen;
-
 int main()
 {
-  ArrayXXf a(2,2);
+  Eigen::ArrayXXf a(2,2);
   
   a << 1,2,
        3,4;
 
-  cout << "(a > 0).all()   = " << (a > 0).all() << endl;
-  cout << "(a > 0).any()   = " << (a > 0).any() << endl;
-  cout << "(a > 0).count() = " << (a > 0).count() << endl;
-  cout << endl;
-  cout << "(a > 2).all()   = " << (a > 2).all() << endl;
-  cout << "(a > 2).any()   = " << (a > 2).any() << endl;
-  cout << "(a > 2).count() = " << (a > 2).count() << endl;
+  std::cout << "(a > 0).all()   = " << (a > 0).all() << std::endl;
+  std::cout << "(a > 0).any()   = " << (a > 0).any() << std::endl;
+  std::cout << "(a > 0).count() = " << (a > 0).count() << std::endl;
+  std::cout << std::endl;
+  std::cout << "(a > 2).all()   = " << (a > 2).all() << std::endl;
+  std::cout << "(a > 2).any()   = " << (a > 2).any() << std::endl;
+  std::cout << "(a > 2).count() = " << (a > 2).count() << std::endl;
 }
diff --git a/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.cpp b/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.cpp
index 740439f..7519137 100644
--- a/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.cpp
+++ b/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_norm.cpp
@@ -1,13 +1,10 @@
 #include <Eigen/Dense>
 #include <iostream>
 
-using namespace std;
-using namespace Eigen;
-
 int main()
 {
-  VectorXf v(2);
-  MatrixXf m(2,2), n(2,2);
+  Eigen::VectorXf v(2);
+  Eigen::MatrixXf m(2,2), n(2,2);
   
   v << -1,
        2;
@@ -15,14 +12,14 @@
   m << 1,-2,
        -3,4;
 
-  cout << "v.squaredNorm() = " << v.squaredNorm() << endl;
-  cout << "v.norm() = " << v.norm() << endl;
-  cout << "v.lpNorm<1>() = " << v.lpNorm<1>() << endl;
-  cout << "v.lpNorm<Infinity>() = " << v.lpNorm<Infinity>() << endl;
+  std::cout << "v.squaredNorm() = " << v.squaredNorm() << std::endl;
+  std::cout << "v.norm() = " << v.norm() << std::endl;
+  std::cout << "v.lpNorm<1>() = " << v.lpNorm<1>() << std::endl;
+  std::cout << "v.lpNorm<Infinity>() = " << v.lpNorm<Eigen::Infinity>() << std::endl;
 
-  cout << endl;
-  cout << "m.squaredNorm() = " << m.squaredNorm() << endl;
-  cout << "m.norm() = " << m.norm() << endl;
-  cout << "m.lpNorm<1>() = " << m.lpNorm<1>() << endl;
-  cout << "m.lpNorm<Infinity>() = " << m.lpNorm<Infinity>() << endl;
+  std::cout << std::endl;
+  std::cout << "m.squaredNorm() = " << m.squaredNorm() << std::endl;
+  std::cout << "m.norm() = " << m.norm() << std::endl;
+  std::cout << "m.lpNorm<1>() = " << m.lpNorm<1>() << std::endl;
+  std::cout << "m.lpNorm<Infinity>() = " << m.lpNorm<Eigen::Infinity>() << std::endl;
 }
diff --git a/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_operatornorm.cpp b/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_operatornorm.cpp
index 62e28fc..8faa5a1 100644
--- a/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_operatornorm.cpp
+++ b/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_reductions_operatornorm.cpp
@@ -1,18 +1,15 @@
 #include <Eigen/Dense>
 #include <iostream>
 
-using namespace Eigen;
-using namespace std;
-
 int main()
 {
-  MatrixXf m(2,2);
+  Eigen::MatrixXf m(2,2);
   m << 1,-2,
        -3,4;
 
-  cout << "1-norm(m)     = " << m.cwiseAbs().colwise().sum().maxCoeff()
-       << " == "             << m.colwise().lpNorm<1>().maxCoeff() << endl;
+  std::cout << "1-norm(m)     = " << m.cwiseAbs().colwise().sum().maxCoeff()
+            << " == "             << m.colwise().lpNorm<1>().maxCoeff() << std::endl;
 
-  cout << "infty-norm(m) = " << m.cwiseAbs().rowwise().sum().maxCoeff()
-       << " == "             << m.rowwise().lpNorm<1>().maxCoeff() << endl;
+  std::cout << "infty-norm(m) = " << m.cwiseAbs().rowwise().sum().maxCoeff()
+            << " == "             << m.rowwise().lpNorm<1>().maxCoeff() << std::endl;
 }
diff --git a/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_visitors.cpp b/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_visitors.cpp
index b54e9aa..bd294bd 100644
--- a/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_visitors.cpp
+++ b/doc/examples/Tutorial_ReductionsVisitorsBroadcasting_visitors.cpp
@@ -1,9 +1,6 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace std;
-using namespace Eigen;
-
 int main()
 {
   Eigen::MatrixXf m(2,2);
@@ -12,15 +9,15 @@
        3, 4;
 
   //get location of maximum
-  MatrixXf::Index maxRow, maxCol;
+  Eigen::Index maxRow, maxCol;
   float max = m.maxCoeff(&maxRow, &maxCol);
 
   //get location of minimum
-  MatrixXf::Index minRow, minCol;
+  Eigen::Index minRow, minCol;
   float min = m.minCoeff(&minRow, &minCol);
 
-  cout << "Max: " << max <<  ", at: " <<
-     maxRow << "," << maxCol << endl;
-  cout << "Min: " << min << ", at: " <<
-     minRow << "," << minCol << endl;
+  std::cout << "Max: " << max <<  ", at: " <<
+     maxRow << "," << maxCol << std::endl;
+  std:: cout << "Min: " << min << ", at: " <<
+     minRow << "," << minCol << std::endl;
 }
diff --git a/doc/examples/Tutorial_simple_example_dynamic_size.cpp b/doc/examples/Tutorial_simple_example_dynamic_size.cpp
index defcb1e..796bd87 100644
--- a/doc/examples/Tutorial_simple_example_dynamic_size.cpp
+++ b/doc/examples/Tutorial_simple_example_dynamic_size.cpp
@@ -1,13 +1,11 @@
 #include <Eigen/Core>
 #include <iostream>
 
-using namespace Eigen;
-
 int main()
 {
   for (int size=1; size<=4; ++size)
   {
-    MatrixXi m(size,size+1);         // a (size)x(size+1)-matrix of int's
+    Eigen::MatrixXi m(size,size+1);         // a (size)x(size+1)-matrix of int's
     for (int j=0; j<m.cols(); ++j)   // loop over columns
       for (int i=0; i<m.rows(); ++i) // loop over rows
         m(i,j) = i+j*size;           // to access matrix coefficients,
@@ -15,7 +13,7 @@
     std::cout << m << "\n\n";
   }
 
-  VectorXf v(4); // a vector of 4 float's
+  Eigen::VectorXf v(4); // a vector of 4 float's
   // to access vector coefficients, use either operator () or operator []
   v[0] = 1; v[1] = 2; v(2) = 3; v(3) = 4;
   std::cout << "\nv:\n" << v << std::endl;
diff --git a/doc/examples/Tutorial_simple_example_fixed_size.cpp b/doc/examples/Tutorial_simple_example_fixed_size.cpp
index bc4f95d..99a974d 100644
--- a/doc/examples/Tutorial_simple_example_fixed_size.cpp
+++ b/doc/examples/Tutorial_simple_example_fixed_size.cpp
@@ -1,14 +1,12 @@
 #include <Eigen/Core>
 #include <iostream>
 
-using namespace Eigen;
-
 int main()
 {
-  Matrix3f m3;
+  Eigen::Matrix3f m3;
   m3 << 1, 2, 3, 4, 5, 6, 7, 8, 9;
-  Matrix4f m4 = Matrix4f::Identity();
-  Vector4i v4(1, 2, 3, 4);
+  Eigen::Matrix4f m4 = Eigen::Matrix4f::Identity();
+  Eigen::Vector4i v4(1, 2, 3, 4);
 
   std::cout << "m3\n" << m3 << "\nm4:\n"
     << m4 << "\nv4:\n" << v4 << std::endl;
diff --git a/doc/examples/class_Block.cpp b/doc/examples/class_Block.cpp
index ace719a..9ace0da 100644
--- a/doc/examples/class_Block.cpp
+++ b/doc/examples/class_Block.cpp
@@ -1,27 +1,25 @@
 #include <Eigen/Core>
 #include <iostream>
-using namespace Eigen;
-using namespace std;
 
 template<typename Derived>
 Eigen::Block<Derived>
-topLeftCorner(MatrixBase<Derived>& m, int rows, int cols)
+topLeftCorner(Eigen::MatrixBase<Derived>& m, int rows, int cols)
 {
   return Eigen::Block<Derived>(m.derived(), 0, 0, rows, cols);
 }
 
 template<typename Derived>
 const Eigen::Block<const Derived>
-topLeftCorner(const MatrixBase<Derived>& m, int rows, int cols)
+topLeftCorner(const Eigen::MatrixBase<Derived>& m, int rows, int cols)
 {
   return Eigen::Block<const Derived>(m.derived(), 0, 0, rows, cols);
 }
 
 int main(int, char**)
 {
-  Matrix4d m = Matrix4d::Identity();
-  cout << topLeftCorner(4*m, 2, 3) << endl; // calls the const version
+  Eigen::Matrix4d m = Eigen::Matrix4d::Identity();
+  std::cout << topLeftCorner(4*m, 2, 3) << std::endl; // calls the const version
   topLeftCorner(m, 2, 3) *= 5;              // calls the non-const version
-  cout << "Now the matrix m is:" << endl << m << endl;
+  std::cout << "Now the matrix m is:" << std::endl << m << std::endl;
   return 0;
 }
diff --git a/doc/examples/class_CwiseBinaryOp.cpp b/doc/examples/class_CwiseBinaryOp.cpp
index 682af46..aec926dc 100644
--- a/doc/examples/class_CwiseBinaryOp.cpp
+++ b/doc/examples/class_CwiseBinaryOp.cpp
@@ -1,18 +1,18 @@
 #include <Eigen/Core>
 #include <iostream>
-using namespace Eigen;
-using namespace std;
+
+using Eigen::Matrix4d;
 
 // define a custom template binary functor
 template<typename Scalar> struct MakeComplexOp {
   EIGEN_EMPTY_STRUCT_CTOR(MakeComplexOp)
-  typedef complex<Scalar> result_type;
-  complex<Scalar> operator()(const Scalar& a, const Scalar& b) const { return complex<Scalar>(a,b); }
+  typedef std::complex<Scalar> result_type;
+  result_type operator()(const Scalar& a, const Scalar& b) const { return result_type(a,b); }
 };
 
 int main(int, char**)
 {
   Matrix4d m1 = Matrix4d::Random(), m2 = Matrix4d::Random();
-  cout << m1.binaryExpr(m2, MakeComplexOp<double>()) << endl;
+  std::cout << m1.binaryExpr(m2, MakeComplexOp<double>()) << std::endl;
   return 0;
 }
diff --git a/doc/examples/class_CwiseUnaryOp.cpp b/doc/examples/class_CwiseUnaryOp.cpp
index a5fcc15..6c65f2e 100644
--- a/doc/examples/class_CwiseUnaryOp.cpp
+++ b/doc/examples/class_CwiseUnaryOp.cpp
@@ -1,7 +1,5 @@
 #include <Eigen/Core>
 #include <iostream>
-using namespace Eigen;
-using namespace std;
 
 // define a custom template unary functor
 template<typename Scalar>
@@ -13,7 +11,7 @@
 
 int main(int, char**)
 {
-  Matrix4d m1 = Matrix4d::Random();
-  cout << m1 << endl << "becomes: " << endl << m1.unaryExpr(CwiseClampOp<double>(-0.5,0.5)) << endl;
+  Eigen::Matrix4d m1 = Eigen::Matrix4d::Random();
+  std::cout << m1 << std::endl << "becomes: " << std::endl << m1.unaryExpr(CwiseClampOp<double>(-0.5,0.5)) << std::endl;
   return 0;
 }
diff --git a/doc/examples/class_CwiseUnaryOp_ptrfun.cpp b/doc/examples/class_CwiseUnaryOp_ptrfun.cpp
index 36706d8..e97095e 100644
--- a/doc/examples/class_CwiseUnaryOp_ptrfun.cpp
+++ b/doc/examples/class_CwiseUnaryOp_ptrfun.cpp
@@ -1,7 +1,5 @@
 #include <Eigen/Core>
 #include <iostream>
-using namespace Eigen;
-using namespace std;
 
 // define function to be applied coefficient-wise
 double ramp(double x)
@@ -14,7 +12,7 @@
 
 int main(int, char**)
 {
-  Matrix4d m1 = Matrix4d::Random();
-  cout << m1 << endl << "becomes: " << endl << m1.unaryExpr(ptr_fun(ramp)) << endl;
+  Eigen::Matrix4d m1 = Eigen::Matrix4d::Random();
+  std::cout << m1 << std::endl << "becomes: " << std::endl << m1.unaryExpr(std::ptr_fun(ramp)) << std::endl;
   return 0;
 }
diff --git a/doc/examples/class_FixedBlock.cpp b/doc/examples/class_FixedBlock.cpp
index 9978b32..4bb2d44 100644
--- a/doc/examples/class_FixedBlock.cpp
+++ b/doc/examples/class_FixedBlock.cpp
@@ -1,27 +1,25 @@
 #include <Eigen/Core>
 #include <iostream>
-using namespace Eigen;
-using namespace std;
 
 template<typename Derived>
 Eigen::Block<Derived, 2, 2>
-topLeft2x2Corner(MatrixBase<Derived>& m)
+topLeft2x2Corner(Eigen::MatrixBase<Derived>& m)
 {
   return Eigen::Block<Derived, 2, 2>(m.derived(), 0, 0);
 }
 
 template<typename Derived>
 const Eigen::Block<const Derived, 2, 2>
-topLeft2x2Corner(const MatrixBase<Derived>& m)
+topLeft2x2Corner(const Eigen::MatrixBase<Derived>& m)
 {
   return Eigen::Block<const Derived, 2, 2>(m.derived(), 0, 0);
 }
 
 int main(int, char**)
 {
-  Matrix3d m = Matrix3d::Identity();
-  cout << topLeft2x2Corner(4*m) << endl; // calls the const version
+  Eigen::Matrix3d m = Eigen::Matrix3d::Identity();
+  std::cout << topLeft2x2Corner(4*m) << std::endl; // calls the const version
   topLeft2x2Corner(m) *= 2;              // calls the non-const version
-  cout << "Now the matrix m is:" << endl << m << endl;
+  std::cout << "Now the matrix m is:" << std::endl << m << std::endl;
   return 0;
 }
diff --git a/doc/examples/class_FixedReshaped.cpp b/doc/examples/class_FixedReshaped.cpp
index b6d4085..be7069d 100644
--- a/doc/examples/class_FixedReshaped.cpp
+++ b/doc/examples/class_FixedReshaped.cpp
@@ -1,22 +1,20 @@
 #include <Eigen/Core>
 #include <iostream>
-using namespace Eigen;
-using namespace std;
 
 template<typename Derived>
 Eigen::Reshaped<Derived, 4, 2>
-reshape_helper(MatrixBase<Derived>& m)
+reshape_helper(Eigen::MatrixBase<Derived>& m)
 {
   return Eigen::Reshaped<Derived, 4, 2>(m.derived());
 }
 
 int main(int, char**)
 {
-  MatrixXd m(2, 4);
+  Eigen::MatrixXd m(2, 4);
   m << 1, 2, 3, 4,
        5, 6, 7, 8;
-  MatrixXd n = reshape_helper(m);
-  cout << "matrix m is:" << endl << m << endl;
-  cout << "matrix n is:" << endl << n << endl;
+  Eigen::MatrixXd n = reshape_helper(m);
+  std::cout << "matrix m is:" << std::endl << m << std::endl;
+  std::cout << "matrix n is:" << std::endl << n << std::endl;
   return 0;
 }
diff --git a/doc/examples/class_FixedVectorBlock.cpp b/doc/examples/class_FixedVectorBlock.cpp
index c88c9fb..eed3007 100644
--- a/doc/examples/class_FixedVectorBlock.cpp
+++ b/doc/examples/class_FixedVectorBlock.cpp
@@ -1,27 +1,25 @@
 #include <Eigen/Core>
 #include <iostream>
-using namespace Eigen;
-using namespace std;
 
 template<typename Derived>
 Eigen::VectorBlock<Derived, 2>
-firstTwo(MatrixBase<Derived>& v)
+firstTwo(Eigen::MatrixBase<Derived>& v)
 {
   return Eigen::VectorBlock<Derived, 2>(v.derived(), 0);
 }
 
 template<typename Derived>
 const Eigen::VectorBlock<const Derived, 2>
-firstTwo(const MatrixBase<Derived>& v)
+firstTwo(const Eigen::MatrixBase<Derived>& v)
 {
   return Eigen::VectorBlock<const Derived, 2>(v.derived(), 0);
 }
 
 int main(int, char**)
 {
-  Matrix<int,1,6> v; v << 1,2,3,4,5,6;
-  cout << firstTwo(4*v) << endl; // calls the const version
+  Eigen::Matrix<int,1,6> v; v << 1,2,3,4,5,6;
+  std::cout << firstTwo(4*v) << std::endl; // calls the const version
   firstTwo(v) *= 2;              // calls the non-const version
-  cout << "Now the vector v is:" << endl << v << endl;
+  std::cout << "Now the vector v is:" << std::endl << v << std::endl;
   return 0;
 }
diff --git a/doc/examples/class_Reshaped.cpp b/doc/examples/class_Reshaped.cpp
index 18fb454..7219853 100644
--- a/doc/examples/class_Reshaped.cpp
+++ b/doc/examples/class_Reshaped.cpp
@@ -1,23 +1,21 @@
 #include <Eigen/Core>
 #include <iostream>
-using namespace std;
-using namespace Eigen;
 
 template<typename Derived>
-const Reshaped<const Derived>
-reshape_helper(const MatrixBase<Derived>& m, int rows, int cols)
+const Eigen::Reshaped<const Derived>
+reshape_helper(const Eigen::MatrixBase<Derived>& m, int rows, int cols)
 {
-  return Reshaped<const Derived>(m.derived(), rows, cols);
+  return Eigen::Reshaped<const Derived>(m.derived(), rows, cols);
 }
 
 int main(int, char**)
 {
-  MatrixXd m(3, 4);
+  Eigen::MatrixXd m(3, 4);
   m << 1, 4, 7, 10,
        2, 5, 8, 11,
        3, 6, 9, 12;
-  cout << m << endl;
-  Ref<const MatrixXd> n = reshape_helper(m, 2, 6);
-  cout << "Matrix m is:" << endl << m << endl;
-  cout << "Matrix n is:" << endl << n << endl;
+  std::cout << m << std::endl;
+  Eigen::Ref<const Eigen::MatrixXd> n = reshape_helper(m, 2, 6);
+  std::cout << "Matrix m is:" << std::endl << m << std::endl;
+  std::cout << "Matrix n is:" << std::endl << n << std::endl;
 }
diff --git a/doc/examples/class_VectorBlock.cpp b/doc/examples/class_VectorBlock.cpp
index dc213df..5cee147 100644
--- a/doc/examples/class_VectorBlock.cpp
+++ b/doc/examples/class_VectorBlock.cpp
@@ -1,27 +1,25 @@
 #include <Eigen/Core>
 #include <iostream>
-using namespace Eigen;
-using namespace std;
 
 template<typename Derived>
 Eigen::VectorBlock<Derived>
-segmentFromRange(MatrixBase<Derived>& v, int start, int end)
+segmentFromRange(Eigen::MatrixBase<Derived>& v, int start, int end)
 {
   return Eigen::VectorBlock<Derived>(v.derived(), start, end-start);
 }
 
 template<typename Derived>
 const Eigen::VectorBlock<const Derived>
-segmentFromRange(const MatrixBase<Derived>& v, int start, int end)
+segmentFromRange(const Eigen::MatrixBase<Derived>& v, int start, int end)
 {
   return Eigen::VectorBlock<const Derived>(v.derived(), start, end-start);
 }
 
 int main(int, char**)
 {
-  Matrix<int,1,6> v; v << 1,2,3,4,5,6;
-  cout << segmentFromRange(2*v, 2, 4) << endl; // calls the const version
+  Eigen::Matrix<int,1,6> v; v << 1,2,3,4,5,6;
+  std::cout << segmentFromRange(2*v, 2, 4) << std::endl; // calls the const version
   segmentFromRange(v, 1, 3) *= 5;              // calls the non-const version
-  cout << "Now the vector v is:" << endl << v << endl;
+  std::cout << "Now the vector v is:" << std::endl << v << std::endl;
   return 0;
 }
diff --git a/doc/examples/function_taking_eigenbase.cpp b/doc/examples/function_taking_eigenbase.cpp
index 49d94b3..4e1e5a9 100644
--- a/doc/examples/function_taking_eigenbase.cpp
+++ b/doc/examples/function_taking_eigenbase.cpp
@@ -1,9 +1,8 @@
 #include <iostream>
 #include <Eigen/Core>
-using namespace Eigen;
 
 template <typename Derived>
-void print_size(const EigenBase<Derived>& b)
+void print_size(const Eigen::EigenBase<Derived>& b)
 {
   std::cout << "size (rows, cols): " << b.size() << " (" << b.rows()
             << ", " << b.cols() << ")" << std::endl;
@@ -11,7 +10,7 @@
 
 int main()
 {
-    Vector3f v;
+    Eigen::Vector3f v;
     print_size(v);
     // v.asDiagonal() returns a 3x3 diagonal matrix pseudo-expression
     print_size(v.asDiagonal());
diff --git a/doc/examples/function_taking_ref.cpp b/doc/examples/function_taking_ref.cpp
index 162a202..a837e19 100644
--- a/doc/examples/function_taking_ref.cpp
+++ b/doc/examples/function_taking_ref.cpp
@@ -1,19 +1,17 @@
 #include <iostream>
 #include <Eigen/SVD>
-using namespace Eigen;
-using namespace std;
 
-float inv_cond(const Ref<const MatrixXf>& a)
+float inv_cond(const Eigen::Ref<const Eigen::MatrixXf>& a)
 {
-  const VectorXf sing_vals = a.jacobiSvd().singularValues();
+  const Eigen::VectorXf sing_vals = a.jacobiSvd().singularValues();
   return sing_vals(sing_vals.size()-1) / sing_vals(0);
 }
 
 int main()
 {
-  Matrix4f m = Matrix4f::Random();
-  cout << "matrix m:" << endl << m << endl << endl;
-  cout << "inv_cond(m):          " << inv_cond(m)                      << endl;
-  cout << "inv_cond(m(1:3,1:3)): " << inv_cond(m.topLeftCorner(3,3))   << endl;
-  cout << "inv_cond(m+I):        " << inv_cond(m+Matrix4f::Identity()) << endl;
+  Eigen::MatrixXf m = Eigen::MatrixXf::Random(4, 4);
+  std::cout << "matrix m:\n" << m << "\n\n";
+  std::cout << "inv_cond(m):          " << inv_cond(m)                      << "\n";
+  std::cout << "inv_cond(m(1:3,1:3)): " << inv_cond(m.topLeftCorner(3,3))   << "\n";
+  std::cout << "inv_cond(m+I):        " << inv_cond(m+Eigen::MatrixXf::Identity(4, 4)) << "\n";
 }
diff --git a/doc/examples/make_circulant2.cpp b/doc/examples/make_circulant2.cpp
index 95d3dd3..d86a66b 100644
--- a/doc/examples/make_circulant2.cpp
+++ b/doc/examples/make_circulant2.cpp
@@ -1,8 +1,6 @@
 #include <Eigen/Core>
 #include <iostream>
 
-using namespace Eigen;
-
 // [circulant_func]
 template<class ArgType>
 class circulant_functor {
@@ -10,8 +8,8 @@
 public:
   circulant_functor(const ArgType& arg) : m_vec(arg) {}
 
-  const typename ArgType::Scalar& operator() (Index row, Index col) const {
-    Index index = row - col;
+  const typename ArgType::Scalar& operator() (Eigen::Index row, Eigen::Index col) const {
+    Eigen::Index index = row - col;
     if (index < 0) index += m_vec.size();
     return m_vec(index);
   }
@@ -21,10 +19,10 @@
 // [square]
 template<class ArgType>
 struct circulant_helper {
-  typedef Matrix<typename ArgType::Scalar,
+  typedef Eigen::Matrix<typename ArgType::Scalar,
                  ArgType::SizeAtCompileTime,
                  ArgType::SizeAtCompileTime,
-                 ColMajor,
+                 Eigen::ColMajor,
                  ArgType::MaxSizeAtCompileTime,
                  ArgType::MaxSizeAtCompileTime> MatrixType;
 };
@@ -32,7 +30,7 @@
 
 // [makeCirculant]
 template <class ArgType>
-CwiseNullaryOp<circulant_functor<ArgType>, typename circulant_helper<ArgType>::MatrixType>
+Eigen::CwiseNullaryOp<circulant_functor<ArgType>, typename circulant_helper<ArgType>::MatrixType>
 makeCirculant(const Eigen::MatrixBase<ArgType>& arg)
 {
   typedef typename circulant_helper<ArgType>::MatrixType MatrixType;
diff --git a/doc/examples/nullary_indexing.cpp b/doc/examples/nullary_indexing.cpp
index f710c84..38260af 100644
--- a/doc/examples/nullary_indexing.cpp
+++ b/doc/examples/nullary_indexing.cpp
@@ -1,8 +1,6 @@
 #include <Eigen/Core>
 #include <iostream>
 
-using namespace Eigen;
-
 // [functor]
 template<class ArgType, class RowIndexType, class ColIndexType>
 class indexing_functor {
@@ -10,10 +8,10 @@
   const RowIndexType &m_rowIndices;
   const ColIndexType &m_colIndices;
 public:
-  typedef Matrix<typename ArgType::Scalar,
+  typedef Eigen::Matrix<typename ArgType::Scalar,
                  RowIndexType::SizeAtCompileTime,
                  ColIndexType::SizeAtCompileTime,
-                 ArgType::Flags&RowMajorBit?RowMajor:ColMajor,
+                 ArgType::Flags&Eigen::RowMajorBit?Eigen::RowMajor:Eigen::ColMajor,
                  RowIndexType::MaxSizeAtCompileTime,
                  ColIndexType::MaxSizeAtCompileTime> MatrixType;
 
@@ -21,7 +19,7 @@
     : m_arg(arg), m_rowIndices(row_indices), m_colIndices(col_indices)
   {}
 
-  const typename ArgType::Scalar& operator() (Index row, Index col) const {
+  const typename ArgType::Scalar& operator() (Eigen::Index row, Eigen::Index col) const {
     return m_arg(m_rowIndices[row], m_colIndices[col]);
   }
 };
@@ -29,7 +27,7 @@
 
 // [function]
 template <class ArgType, class RowIndexType, class ColIndexType>
-CwiseNullaryOp<indexing_functor<ArgType,RowIndexType,ColIndexType>, typename indexing_functor<ArgType,RowIndexType,ColIndexType>::MatrixType>
+Eigen::CwiseNullaryOp<indexing_functor<ArgType,RowIndexType,ColIndexType>, typename indexing_functor<ArgType,RowIndexType,ColIndexType>::MatrixType>
 mat_indexing(const Eigen::MatrixBase<ArgType>& arg, const RowIndexType& row_indices, const ColIndexType& col_indices)
 {
   typedef indexing_functor<ArgType,RowIndexType,ColIndexType> Func;
@@ -43,8 +41,8 @@
 {
   std::cout << "[main1]\n";
   Eigen::MatrixXi A = Eigen::MatrixXi::Random(4,4);
-  Array3i ri(1,2,1);
-  ArrayXi ci(6); ci << 3,2,1,0,0,2;
+  Eigen::Array3i ri(1,2,1);
+  Eigen::ArrayXi ci(6); ci << 3,2,1,0,0,2;
   Eigen::MatrixXi B = mat_indexing(A, ri, ci);
   std::cout << "A =" << std::endl;
   std::cout << A << std::endl << std::endl;
@@ -56,7 +54,7 @@
   B =  mat_indexing(A, ri+1, ci);
   std::cout << "A(ri+1,ci) =" << std::endl;
   std::cout << B << std::endl << std::endl;
-  B =  mat_indexing(A, ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), ArrayXi::LinSpaced(4,0,3));
+  B =  mat_indexing(A, Eigen::ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), Eigen::ArrayXi::LinSpaced(4,0,3));
   std::cout << "A(ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), ArrayXi::LinSpaced(4,0,3)) =" << std::endl;
   std::cout << B << std::endl << std::endl;
   std::cout << "[main2]\n";
diff --git a/doc/examples/tut_arithmetic_add_sub.cpp b/doc/examples/tut_arithmetic_add_sub.cpp
index e97477b..95162c0 100644
--- a/doc/examples/tut_arithmetic_add_sub.cpp
+++ b/doc/examples/tut_arithmetic_add_sub.cpp
@@ -1,14 +1,12 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace Eigen;
-
 int main()
 {
-  Matrix2d a;
+  Eigen::Matrix2d a;
   a << 1, 2,
        3, 4;
-  MatrixXd b(2,2);
+  Eigen::MatrixXd b(2,2);
   b << 2, 3,
        1, 4;
   std::cout << "a + b =\n" << a + b << std::endl;
@@ -16,7 +14,7 @@
   std::cout << "Doing a += b;" << std::endl;
   a += b;
   std::cout << "Now a =\n" << a << std::endl;
-  Vector3d v(1,2,3);
-  Vector3d w(1,0,0);
+  Eigen::Vector3d v(1,2,3);
+  Eigen::Vector3d w(1,0,0);
   std::cout << "-v + w - v =\n" << -v + w - v << std::endl;
 }
diff --git a/doc/examples/tut_arithmetic_dot_cross.cpp b/doc/examples/tut_arithmetic_dot_cross.cpp
index 631c9a5..5b0fd1e 100644
--- a/doc/examples/tut_arithmetic_dot_cross.cpp
+++ b/doc/examples/tut_arithmetic_dot_cross.cpp
@@ -1,15 +1,13 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace Eigen;
-using namespace std;
 int main()
 {
-  Vector3d v(1,2,3);
-  Vector3d w(0,1,2);
+  Eigen::Vector3d v(1,2,3);
+  Eigen::Vector3d w(0,1,2);
 
-  cout << "Dot product: " << v.dot(w) << endl;
+  std::cout << "Dot product: " << v.dot(w) << std::endl;
   double dp = v.adjoint()*w; // automatic conversion of the inner product to a scalar
-  cout << "Dot product via a matrix product: " << dp << endl;
-  cout << "Cross product:\n" << v.cross(w) << endl;
+  std::cout << "Dot product via a matrix product: " << dp << std::endl;
+  std::cout << "Cross product:\n" << v.cross(w) << std::endl;
 }
diff --git a/doc/examples/tut_arithmetic_matrix_mul.cpp b/doc/examples/tut_arithmetic_matrix_mul.cpp
index f213902..c2d5e2d 100644
--- a/doc/examples/tut_arithmetic_matrix_mul.cpp
+++ b/doc/examples/tut_arithmetic_matrix_mul.cpp
@@ -1,13 +1,12 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace Eigen;
 int main()
 {
-  Matrix2d mat;
+  Eigen::Matrix2d mat;
   mat << 1, 2,
          3, 4;
-  Vector2d u(-1,1), v(2,0);
+  Eigen::Vector2d u(-1,1), v(2,0);
   std::cout << "Here is mat*mat:\n" << mat*mat << std::endl;
   std::cout << "Here is mat*u:\n" << mat*u << std::endl;
   std::cout << "Here is u^T*mat:\n" << u.transpose()*mat << std::endl;
diff --git a/doc/examples/tut_arithmetic_scalar_mul_div.cpp b/doc/examples/tut_arithmetic_scalar_mul_div.cpp
index d5f65b5..0ba8d6b 100644
--- a/doc/examples/tut_arithmetic_scalar_mul_div.cpp
+++ b/doc/examples/tut_arithmetic_scalar_mul_div.cpp
@@ -1,14 +1,12 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace Eigen;
-
 int main()
 {
-  Matrix2d a;
+  Eigen::Matrix2d a;
   a << 1, 2,
        3, 4;
-  Vector3d v(1,2,3);
+  Eigen::Vector3d v(1,2,3);
   std::cout << "a * 2.5 =\n" << a * 2.5 << std::endl;
   std::cout << "0.1 * v =\n" << 0.1 * v << std::endl;
   std::cout << "Doing v *= 2;" << std::endl;
diff --git a/doc/examples/tut_matrix_coefficient_accessors.cpp b/doc/examples/tut_matrix_coefficient_accessors.cpp
index c2da171..040087c 100644
--- a/doc/examples/tut_matrix_coefficient_accessors.cpp
+++ b/doc/examples/tut_matrix_coefficient_accessors.cpp
@@ -1,17 +1,15 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace Eigen;
-
 int main()
 {
-  MatrixXd m(2,2);
+  Eigen::MatrixXd m(2,2);
   m(0,0) = 3;
   m(1,0) = 2.5;
   m(0,1) = -1;
   m(1,1) = m(1,0) + m(0,1);
   std::cout << "Here is the matrix m:\n" << m << std::endl;
-  VectorXd v(2);
+  Eigen::VectorXd v(2);
   v(0) = 4;
   v(1) = v(0) - 1;
   std::cout << "Here is the vector v:\n" << v << std::endl;
diff --git a/doc/examples/tut_matrix_resize.cpp b/doc/examples/tut_matrix_resize.cpp
index 0392c3a..aa80cf5 100644
--- a/doc/examples/tut_matrix_resize.cpp
+++ b/doc/examples/tut_matrix_resize.cpp
@@ -1,16 +1,14 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace Eigen;
-
 int main()
 {
-  MatrixXd m(2,5);
+  Eigen::MatrixXd m(2,5);
   m.resize(4,3);
   std::cout << "The matrix m is of size "
             << m.rows() << "x" << m.cols() << std::endl;
   std::cout << "It has " << m.size() << " coefficients" << std::endl;
-  VectorXd v(2);
+  Eigen::VectorXd v(2);
   v.resize(5);
   std::cout << "The vector v is of size " << v.size() << std::endl;
   std::cout << "As a matrix, v is of size "
diff --git a/doc/examples/tut_matrix_resize_fixed_size.cpp b/doc/examples/tut_matrix_resize_fixed_size.cpp
index dcbdfa7..3df87d2 100644
--- a/doc/examples/tut_matrix_resize_fixed_size.cpp
+++ b/doc/examples/tut_matrix_resize_fixed_size.cpp
@@ -1,11 +1,9 @@
 #include <iostream>
 #include <Eigen/Dense>
 
-using namespace Eigen;
-
 int main()
 {
-  Matrix4d m;
+  Eigen::Matrix4d m;
   m.resize(4,4); // no operation
   std::cout << "The matrix m is of size "
             << m.rows() << "x" << m.cols() << std::endl;
diff --git a/doc/snippets/Slicing_arrayexpr.cpp b/doc/snippets/Slicing_arrayexpr.cpp
index 2df8180..6d09980 100644
--- a/doc/snippets/Slicing_arrayexpr.cpp
+++ b/doc/snippets/Slicing_arrayexpr.cpp
@@ -1,4 +1,4 @@
 ArrayXi ind(5); ind<<4,2,5,5,3;
 MatrixXi A = MatrixXi::Random(4,6);
 cout << "Initial matrix A:\n" << A << "\n\n";
-cout << "A(all,ind-1):\n" << A(all,ind-1) << "\n\n";
+cout << "A(all,ind-1):\n" << A(Eigen::placeholders::all,ind-1) << "\n\n";
diff --git a/doc/snippets/Slicing_rawarray_cxx11.cpp b/doc/snippets/Slicing_rawarray_cxx11.cpp
index 1087131..7a3e6e5 100644
--- a/doc/snippets/Slicing_rawarray_cxx11.cpp
+++ b/doc/snippets/Slicing_rawarray_cxx11.cpp
@@ -1,5 +1,3 @@
-#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE
 MatrixXi A = MatrixXi::Random(4,6);
 cout << "Initial matrix A:\n" << A << "\n\n";
-cout << "A(all,{4,2,5,5,3}):\n" << A(all,{4,2,5,5,3}) << "\n\n";
-#endif
+cout << "A(all,{4,2,5,5,3}):\n" << A(Eigen::placeholders::all,{4,2,5,5,3}) << "\n\n";
diff --git a/doc/snippets/Slicing_stdvector_cxx11.cpp b/doc/snippets/Slicing_stdvector_cxx11.cpp
index 555f662..74f0727 100644
--- a/doc/snippets/Slicing_stdvector_cxx11.cpp
+++ b/doc/snippets/Slicing_stdvector_cxx11.cpp
@@ -1,4 +1,4 @@
 std::vector<int> ind{4,2,5,5,3};
 MatrixXi A = MatrixXi::Random(4,6);
 cout << "Initial matrix A:\n" << A << "\n\n";
-cout << "A(all,ind):\n" << A(all,ind) << "\n\n";
+cout << "A(all,ind):\n" << A(Eigen::placeholders::all,ind) << "\n\n";
diff --git a/doc/special_examples/random_cpp11.cpp b/doc/special_examples/random_cpp11.cpp
index 33744c0..bd73800 100644
--- a/doc/special_examples/random_cpp11.cpp
+++ b/doc/special_examples/random_cpp11.cpp
@@ -2,13 +2,11 @@
 #include <iostream>
 #include <random>
 
-using namespace Eigen;
-
 int main() {
   std::default_random_engine generator;
   std::poisson_distribution<int> distribution(4.1);
   auto poisson = [&] () {return distribution(generator);};
 
-  RowVectorXi v = RowVectorXi::NullaryExpr(10, poisson );
+  Eigen::RowVectorXi v = Eigen::RowVectorXi::NullaryExpr(10, poisson );
   std::cout << v << "\n";
 }
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index ad09599..c41855a 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -303,7 +303,7 @@
   endif()
 endif()
 
-ei_add_test(fastmath " ${EIGEN_FASTMATH_FLAGS} ")
+ei_add_test(fastmath "${EIGEN_FASTMATH_FLAGS}")
 
 # # ei_add_test(denseLM)
 
diff --git a/test/dense_storage.cpp b/test/dense_storage.cpp
index 826874c..398cb32 100644
--- a/test/dense_storage.cpp
+++ b/test/dense_storage.cpp
@@ -13,7 +13,6 @@
 
 #include <Eigen/Core>
 
-#if EIGEN_HAS_TYPE_TRAITS
 using DenseStorageD3x3 = Eigen::DenseStorage<double, 3, 3, 3, 3>;
 static_assert(std::is_trivially_move_constructible<DenseStorageD3x3>::value, "DenseStorage not trivially_move_constructible");
 static_assert(std::is_trivially_move_assignable<DenseStorageD3x3>::value, "DenseStorage not trivially_move_assignable");
@@ -22,7 +21,6 @@
 static_assert(std::is_trivially_copy_assignable<DenseStorageD3x3>::value, "DenseStorage not trivially_copy_assignable");
 static_assert(std::is_trivially_copyable<DenseStorageD3x3>::value, "DenseStorage not trivially_copyable");
 #endif
-#endif
 
 template <typename T, int Size, int Rows, int Cols>
 void dense_storage_copy(int rows, int cols)
@@ -90,8 +88,6 @@
 template<typename T, int Size, std::size_t Alignment>
 void dense_storage_alignment()
 {
-  #if EIGEN_HAS_ALIGNAS
-  
   struct alignas(Alignment) Empty1 {};
   VERIFY_IS_EQUAL(std::alignment_of<Empty1>::value, Alignment);
 
@@ -109,8 +105,6 @@
   VERIFY_IS_EQUAL( (std::alignment_of<Matrix<T,Size,1,AutoAlign> >::value), default_alignment);
   struct Nested2 { Matrix<T,Size,1,AutoAlign> mat; };
   VERIFY_IS_EQUAL(std::alignment_of<Nested2>::value, default_alignment);
-
-  #endif
 }
 
 template<typename T>
diff --git a/test/householder.cpp b/test/householder.cpp
index cad8138..29a6c1d 100644
--- a/test/householder.cpp
+++ b/test/householder.cpp
@@ -30,7 +30,7 @@
 
   typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, MatrixType::RowsAtCompileTime> TMatrixType;
   
-  Matrix<Scalar, EIGEN_SIZE_MAX(MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime), 1> _tmp((std::max)(rows,cols));
+  Matrix<Scalar, internal::max_size_prefer_dynamic(MatrixType::RowsAtCompileTime,MatrixType::ColsAtCompileTime), 1> _tmp((std::max)(rows,cols));
   Scalar* tmp = &_tmp.coeffRef(0,0);
 
   Scalar beta;
diff --git a/test/indexed_view.cpp b/test/indexed_view.cpp
index a3b336c..d8a5532 100644
--- a/test/indexed_view.cpp
+++ b/test/indexed_view.cpp
@@ -7,11 +7,6 @@
 // Public License v. 2.0. If a copy of the MPL was not distributed
 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
-#ifdef EIGEN_TEST_PART_2
-// Make sure we also check c++11 max implementation
-#define EIGEN_MAX_CPP_VER 11
-#endif
-
 #include <valarray>
 #include <vector>
 #include "main.h"
@@ -84,11 +79,7 @@
   ArrayXd a = ArrayXd::LinSpaced(n,0,n-1);
   Array<double,1,Dynamic> b = a.transpose();
 
-  #if EIGEN_COMP_CXXVER>=14
   ArrayXXi A = ArrayXXi::NullaryExpr(n,n, std::ref(encode));
-  #else
-  ArrayXXi A = ArrayXXi::NullaryExpr(n,n, std::ptr_fun(&encode));
-  #endif
 
   for(Index i=0; i<n; ++i)
     for(Index j=0; j<n; ++j)
@@ -299,7 +290,6 @@
 
   VERIFY_IS_APPROX( (A(std::array<int,3>{{1,3,5}}, std::array<int,4>{{9,6,3,0}})), A(seqN(1,3,2), seqN(9,4,-3)) );
 
-#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE
   VERIFY_IS_APPROX( A({3, 1, 6, 5}, all), A(std::array<int,4>{{3, 1, 6, 5}}, all) );
   VERIFY_IS_APPROX( A(all,{3, 1, 6, 5}), A(all,std::array<int,4>{{3, 1, 6, 5}}) );
   VERIFY_IS_APPROX( A({1,3,5},{3, 1, 6, 5}), A(std::array<int,3>{{1,3,5}},std::array<int,4>{{3, 1, 6, 5}}) );
@@ -312,7 +302,6 @@
 
   VERIFY_IS_APPROX( b({3, 1, 6, 5}), b(std::array<int,4>{{3, 1, 6, 5}}) );
   VERIFY_IS_EQUAL( b({1,3,5}).SizeAtCompileTime, 3 );
-#endif
 
   // check mat(i,j) with weird types for i and j
   {
@@ -438,7 +427,6 @@
 {
 //   for(int i = 0; i < g_repeat; i++) {
     CALL_SUBTEST_1( check_indexed_view() );
-    CALL_SUBTEST_2( check_indexed_view() );
 //   }
 
   // static checks of some internals:
diff --git a/test/meta.cpp b/test/meta.cpp
index 7a8b93c..d362c9f 100644
--- a/test/meta.cpp
+++ b/test/meta.cpp
@@ -114,13 +114,7 @@
   // So the following tests are expected to fail with recent compilers.
 
   STATIC_CHECK(( !internal::is_convertible<MyInterface, MyImpl>::value ));
-  #if (!EIGEN_COMP_GNUC_STRICT) || (EIGEN_GNUC_AT_LEAST(4,8))
-  // GCC prior to 4.8 fails to compile this test:
-  // error: cannot allocate an object of abstract type 'MyInterface'
-  // In other word, it does not obey SFINAE.
-  // Nevertheless, we don't really care about supporting abstract type as scalar type!
   STATIC_CHECK(( !internal::is_convertible<MyImpl, MyInterface>::value ));
-  #endif
   STATIC_CHECK((  internal::is_convertible<MyImpl, const MyInterface&>::value ));
 
   #endif
diff --git a/test/packetmath.cpp b/test/packetmath.cpp
index 0600ddb..fcdc2bb 100644
--- a/test/packetmath.cpp
+++ b/test/packetmath.cpp
@@ -48,7 +48,7 @@
 
 template <typename T>
 inline T REF_FREXP(const T& x, T& exp) {
-  int iexp;
+  int iexp = 0;
   EIGEN_USING_STD(frexp)
   const T out = static_cast<T>(frexp(x, &iexp));
   exp = static_cast<T>(iexp);
@@ -713,6 +713,7 @@
   for (int i = 0; i < size; ++i) {
     data1[i] = Scalar(internal::random<double>(-87, 88));
     data2[i] = Scalar(internal::random<double>(-87, 88));
+    data1[0] = -NumTraits<Scalar>::infinity();
   }
   CHECK_CWISE1_IF(PacketTraits::HasExp, std::exp, internal::pexp);
   
diff --git a/test/random_matrix.cpp b/test/random_matrix.cpp
index fb877de..873845f 100644
--- a/test/random_matrix.cpp
+++ b/test/random_matrix.cpp
@@ -82,7 +82,7 @@
     enum {
         Rows = MatrixType::RowsAtCompileTime,
         Cols = MatrixType::ColsAtCompileTime,
-        DiagSize = EIGEN_SIZE_MIN_PREFER_DYNAMIC(Rows, Cols)
+        DiagSize = internal::min_size_prefer_dynamic(Rows, Cols)
     };
     typedef typename MatrixType::Scalar Scalar;
     typedef typename NumTraits<Scalar>::Real RealScalar;
diff --git a/test/ref.cpp b/test/ref.cpp
index 5e7a3b3..d840800 100644
--- a/test/ref.cpp
+++ b/test/ref.cpp
@@ -21,7 +21,7 @@
 // Deal with i387 extended precision
 #if EIGEN_ARCH_i386 && !(EIGEN_ARCH_x86_64)
 
-#if EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_LEAST(4,4)
+#if EIGEN_COMP_GNUC_STRICT
 #pragma GCC optimize ("-ffloat-store")
 #else
 #undef VERIFY_IS_EQUAL
diff --git a/test/stl_iterators.cpp b/test/stl_iterators.cpp
index 533a3fe..aab3be9 100644
--- a/test/stl_iterators.cpp
+++ b/test/stl_iterators.cpp
@@ -452,10 +452,8 @@
     using VecOp = VectorwiseOp<ArrayXXi, 0>;
     STATIC_CHECK(( internal::is_same<VecOp::const_iterator, decltype(std::declval<const VecOp&>().cbegin())>::value ));
     STATIC_CHECK(( internal::is_same<VecOp::const_iterator, decltype(std::declval<const VecOp&>().cend  ())>::value ));
-    #if EIGEN_COMP_CXXVER>=14
-      STATIC_CHECK(( internal::is_same<VecOp::const_iterator, decltype(std::cbegin(std::declval<const VecOp&>()))>::value ));
-      STATIC_CHECK(( internal::is_same<VecOp::const_iterator, decltype(std::cend  (std::declval<const VecOp&>()))>::value ));
-    #endif
+    STATIC_CHECK(( internal::is_same<VecOp::const_iterator, decltype(std::cbegin(std::declval<const VecOp&>()))>::value ));
+    STATIC_CHECK(( internal::is_same<VecOp::const_iterator, decltype(std::cend  (std::declval<const VecOp&>()))>::value ));
   }
 }
 
diff --git a/test/symbolic_index.cpp b/test/symbolic_index.cpp
index 22ed00c..53eb55d 100644
--- a/test/symbolic_index.cpp
+++ b/test/symbolic_index.cpp
@@ -60,7 +60,6 @@
   VERIFY_IS_EQUAL( ( lastp1-3*last  ).eval(last=size-1), size- 3*(size-1) );
   VERIFY_IS_EQUAL( ( (lastp1-3*last)/lastp1  ).eval(last=size-1), (size- 3*(size-1))/size );
 
-#if EIGEN_HAS_CXX14_VARIABLE_TEMPLATES
   {
     struct x_tag {};  static const symbolic::SymbolExpr<x_tag> x;
     struct y_tag {};  static const symbolic::SymbolExpr<y_tag> y;
@@ -68,7 +67,6 @@
 
     VERIFY_IS_APPROX( int(((x+3)/y+z).eval(x=6,y=3,z=-13)), (6+3)/3+(-13) );
   }
-#endif
 }
 
 EIGEN_DECLARE_TEST(symbolic_index)
diff --git a/test/vectorization_logic.cpp b/test/vectorization_logic.cpp
index 62d3f60..1c086cb 100644
--- a/test/vectorization_logic.cpp
+++ b/test/vectorization_logic.cpp
@@ -245,11 +245,11 @@
             >(InnerVectorizedTraversal,CompleteUnrolling)));
 
     VERIFY((test_assign<
-            Map<Matrix<Scalar,EIGEN_PLAIN_ENUM_MAX(2,PacketSize),EIGEN_PLAIN_ENUM_MAX(2,PacketSize)>, AlignedMax, InnerStride<3*PacketSize> >,
-            Matrix<Scalar,EIGEN_PLAIN_ENUM_MAX(2,PacketSize),EIGEN_PLAIN_ENUM_MAX(2,PacketSize)>
+            Map<Matrix<Scalar, internal::plain_enum_max(2,PacketSize), internal::plain_enum_max(2, PacketSize)>, AlignedMax, InnerStride<3*PacketSize> >,
+            Matrix<Scalar, internal::plain_enum_max(2, PacketSize), internal::plain_enum_max(2, PacketSize)>
             >(DefaultTraversal,PacketSize>=8?InnerUnrolling:CompleteUnrolling)));
 
-    VERIFY((test_assign(Matrix11(), Matrix<Scalar,PacketSize,EIGEN_PLAIN_ENUM_MIN(2,PacketSize)>()*Matrix<Scalar,EIGEN_PLAIN_ENUM_MIN(2,PacketSize),PacketSize>(),
+    VERIFY((test_assign(Matrix11(), Matrix<Scalar,PacketSize, internal::plain_enum_min(2, PacketSize)>()*Matrix<Scalar, internal::plain_enum_min(2, PacketSize),PacketSize>(),
                         InnerVectorizedTraversal, CompleteUnrolling)));
     #endif
 
@@ -407,8 +407,8 @@
     }
 
     VERIFY((test_assign<
-            Map<Matrix<Scalar,EIGEN_PLAIN_ENUM_MAX(2,PacketSize),EIGEN_PLAIN_ENUM_MAX(2,PacketSize)>, AlignedMax, InnerStride<3*PacketSize> >,
-            Matrix<Scalar,EIGEN_PLAIN_ENUM_MAX(2,PacketSize),EIGEN_PLAIN_ENUM_MAX(2,PacketSize)>
+            Map<Matrix<Scalar, plain_enum_max(2,PacketSize), plain_enum_max(2,PacketSize)>, AlignedMax, InnerStride<3*PacketSize> >,
+            Matrix<Scalar, plain_enum_max(2,PacketSize), plain_enum_max(2,PacketSize)>
             >(DefaultTraversal,PacketSize>4?InnerUnrolling:CompleteUnrolling)));
 
     VERIFY((test_assign(Matrix57(), Matrix<Scalar,5*PacketSize,3>()*Matrix<Scalar,3,7>(),
diff --git a/unsupported/Eigen/CXX11/src/Tensor/Tensor.h b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
index 5a14c71..a3c8c48 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/Tensor.h
@@ -89,14 +89,12 @@
   protected:
     TensorStorage<Scalar, Dimensions, Options> m_storage;
 
-#ifdef EIGEN_HAS_SFINAE
     template<typename CustomIndices>
     struct isOfNormalIndex{
       static const bool is_array = internal::is_base_of<array<Index, NumIndices>, CustomIndices>::value;
       static const bool is_int = NumTraits<CustomIndices>::IsInteger;
       static const bool value = is_array | is_int;
     };
-#endif
 
   public:
     // Metadata
@@ -113,7 +111,6 @@
     inline Self& base()             { return *this; }
     inline const Self& base() const { return *this; }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     template<typename... IndexTypes>
     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
     {
@@ -121,7 +118,6 @@
       EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
       return coeff(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
     }
-#endif
 
     // normal indices
     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
@@ -131,7 +127,6 @@
     }
 
     // custom indices
-#ifdef EIGEN_HAS_SFINAE
     template<typename CustomIndices,
              EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
     >
@@ -139,7 +134,6 @@
     {
         return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
     }
-#endif
 
     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff() const
     {
@@ -153,7 +147,6 @@
       return m_storage.data()[index];
     }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     template<typename... IndexTypes>
     inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
     {
@@ -161,7 +154,6 @@
       EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
       return coeffRef(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
     }
-#endif
 
     // normal indices
     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
@@ -171,7 +163,6 @@
     }
 
     // custom indices
-#ifdef EIGEN_HAS_SFINAE
     template<typename CustomIndices,
              EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
              >
@@ -179,7 +170,6 @@
     {
         return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
     }
-#endif
 
     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef()
     {
@@ -193,7 +183,6 @@
       return m_storage.data()[index];
     }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     template<typename... IndexTypes>
     inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
     {
@@ -201,31 +190,8 @@
       EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
       return this->operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
     }
-#else
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
-    {
-      return coeff(array<Index, 2>(i0, i1));
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
-    {
-      return coeff(array<Index, 3>(i0, i1, i2));
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
-    {
-      return coeff(array<Index, 4>(i0, i1, i2, i3));
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
-    {
-      return coeff(array<Index, 5>(i0, i1, i2, i3, i4));
-    }
-#endif
 
     // custom indices
-#ifdef EIGEN_HAS_SFINAE
     template<typename CustomIndices,
              EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
     >
@@ -233,7 +199,6 @@
     {
         return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
     }
-#endif
 
     // normal indices
     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
@@ -260,7 +225,6 @@
       return coeff(index);
     }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     template<typename... IndexTypes>
     inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
     {
@@ -268,28 +232,6 @@
       EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
       return operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
     }
-#else
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
-    {
-      return coeffRef(array<Index, 2>(i0, i1));
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
-    {
-      return coeffRef(array<Index, 3>(i0, i1, i2));
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
-    {
-      return coeffRef(array<Index, 4>(i0, i1, i2, i3));
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
-    {
-      return coeffRef(array<Index, 5>(i0, i1, i2, i3, i4));
-    }
-#endif
 
     // normal indices
     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
@@ -298,7 +240,6 @@
     }
 
     // custom indices
-#ifdef EIGEN_HAS_SFINAE
     template<typename CustomIndices,
              EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
     >
@@ -306,7 +247,6 @@
     {
       return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
     }
-#endif
 
     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index index)
     {
@@ -339,7 +279,6 @@
     {
     }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     template<typename... IndexTypes>
     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index firstDimension, IndexTypes... otherDimensions)
         : m_storage(firstDimension, otherDimensions...)
@@ -347,33 +286,6 @@
       // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
       EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
     }
-#else
-    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(Index dim1)
-      : m_storage(dim1, array<Index, 1>(dim1))
-    {
-      EIGEN_STATIC_ASSERT(1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
-    }
-    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2)
-      : m_storage(dim1*dim2, array<Index, 2>(dim1, dim2))
-    {
-      EIGEN_STATIC_ASSERT(2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
-    }
-    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3)
-      : m_storage(dim1*dim2*dim3, array<Index, 3>(dim1, dim2, dim3))
-    {
-      EIGEN_STATIC_ASSERT(3 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
-    }
-    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4)
-      : m_storage(dim1*dim2*dim3*dim4, array<Index, 4>(dim1, dim2, dim3, dim4))
-    {
-      EIGEN_STATIC_ASSERT(4 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
-    }
-    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4, Index dim5)
-      : m_storage(dim1*dim2*dim3*dim4*dim5, array<Index, 5>(dim1, dim2, dim3, dim4, dim5))
-    {
-      EIGEN_STATIC_ASSERT(5 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
-    }
-#endif
 
     /** Normal Dimension */
     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(const array<Index, NumIndices>& dimensions)
@@ -434,7 +346,6 @@
       return *this;
     }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     template<typename... IndexTypes> EIGEN_DEVICE_FUNC
     void resize(Index firstDimension, IndexTypes... otherDimensions)
     {
@@ -442,7 +353,6 @@
       EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
       resize(array<Index, NumIndices>{{firstDimension, otherDimensions...}});
     }
-#endif
 
     /** Normal Dimension */
     EIGEN_DEVICE_FUNC void resize(const array<Index, NumIndices>& dimensions)
@@ -491,7 +401,6 @@
 #endif
 
     /** Custom Dimension */
-#ifdef EIGEN_HAS_SFINAE
     template<typename CustomDimension,
              EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomDimension>::value) )
     >
@@ -499,7 +408,6 @@
     {
       resize(internal::customIndices2Array<Index,NumIndices>(dimensions));
     }
-#endif
 
 #ifndef EIGEN_EMULATE_CXX11_META_H
     template <typename std::ptrdiff_t... Indices>
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
index 9c356f4..945e9fc 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h
@@ -1012,7 +1012,6 @@
       return derived() = this->template random<RandomGenerator>();
     }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     EIGEN_DEVICE_FUNC
     EIGEN_STRONG_INLINE Derived& setValues(
         const typename internal::Initializer<Derived, NumDimensions>::InitList& vals) {
@@ -1020,7 +1019,6 @@
       internal::initialize_tensor<Derived, NumDimensions>(eval, vals);
       return derived();
     }
-#endif  // EIGEN_HAS_VARIADIC_TEMPLATES
 
     template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
     Derived& operator+=(const OtherDerived& other) {
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
index 6fc2aa8..84ae848 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
@@ -111,12 +111,10 @@
   explicit EIGEN_DEVICE_FUNC Sizes(const array<DenseIndex, Base::count>& /*indices*/) {
     // todo: add assertion
   }
-#if EIGEN_HAS_VARIADIC_TEMPLATES
   template <typename... DenseIndex> EIGEN_DEVICE_FUNC Sizes(DenseIndex...) { }
   explicit EIGEN_DEVICE_FUNC Sizes(std::initializer_list<std::ptrdiff_t> /*l*/) {
     // todo: add assertion
   }
-#endif
 
   template <typename T> Sizes& operator = (const T& /*other*/) {
     // add assertion failure if the size of other is different
@@ -173,28 +171,16 @@
   explicit Sizes(const array<DenseIndex, Base::count>& /*indices*/) {
     // todo: add assertion
   }
+
   template <typename T> Sizes& operator = (const T& /*other*/) {
     // add assertion failure if the size of other is different
     return *this;
   }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
   template <typename... DenseIndex> Sizes(DenseIndex... /*indices*/) { }
   explicit Sizes(std::initializer_list<std::ptrdiff_t>) {
     // todo: add assertion
   }
-#else
-  EIGEN_DEVICE_FUNC explicit Sizes(const DenseIndex) {
-  }
-  EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex) {
-  }
-  EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex) {
-  }
-  EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex) {
-  }
-  EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex) {
-  }
-#endif
 
   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index operator[] (const Index index) const {
     switch (index) {
@@ -337,39 +323,10 @@
   }
 #endif
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
   template<typename... IndexTypes> EIGEN_DEVICE_FUNC
   EIGEN_STRONG_INLINE explicit DSizes(DenseIndex firstDimension, DenseIndex secondDimension, IndexTypes... otherDimensions) : Base({{firstDimension, secondDimension, otherDimensions...}}) {
     EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 2 == NumDims, YOU_MADE_A_PROGRAMMING_MISTAKE)
   }
-#else
-  EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1) {
-    eigen_assert(NumDims == 2);
-    (*this)[0] = i0;
-    (*this)[1] = i1;
-  }
-  EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2) {
-    eigen_assert(NumDims == 3);
-    (*this)[0] = i0;
-    (*this)[1] = i1;
-    (*this)[2] = i2;
-  }
-  EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2, const DenseIndex i3) {
-    eigen_assert(NumDims == 4);
-    (*this)[0] = i0;
-    (*this)[1] = i1;
-    (*this)[2] = i2;
-    (*this)[3] = i3;
-  }
-  EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2, const DenseIndex i3, const DenseIndex i4) {
-    eigen_assert(NumDims == 5);
-    (*this)[0] = i0;
-    (*this)[1] = i1;
-    (*this)[2] = i2;
-    (*this)[3] = i3;
-    (*this)[4] = i4;
-  }
-#endif
 
   EIGEN_DEVICE_FUNC DSizes& operator = (const array<DenseIndex, NumDims>& other) {
     *static_cast<Base*>(this) = other;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
index 6b11b79..fe2d7c0 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
@@ -74,7 +74,6 @@
     inline Self& base()             { return *this; }
     inline const Self& base() const { return *this; }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     template<typename... IndexTypes>
     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, IndexTypes... otherIndices) const
     {
@@ -82,7 +81,6 @@
       EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
       return coeff(array<Index, NumIndices>{{firstIndex, otherIndices...}});
     }
-#endif
 
     EIGEN_DEVICE_FUNC
     EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
@@ -106,7 +104,6 @@
     }
 
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     template<typename... IndexTypes>
     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices)
     {
@@ -114,7 +111,6 @@
       EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
       return coeffRef(array<Index, NumIndices>{{firstIndex, otherIndices...}});
     }
-#endif
 
     EIGEN_DEVICE_FUNC
     EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
@@ -137,7 +133,6 @@
       return m_storage.data()[0];
     }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     template<typename... IndexTypes>
     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) const
     {
@@ -145,53 +140,6 @@
       EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
       return this->operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
     }
-#else
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
-    {
-      if (Options&RowMajor) {
-        const Index index = i1 + i0 * m_storage.dimensions()[1];
-        return m_storage.data()[index];
-      } else {
-        const Index index = i0 + i1 * m_storage.dimensions()[0];
-        return m_storage.data()[index];
-      }
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
-    {
-      if (Options&RowMajor) {
-         const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
-         return m_storage.data()[index];
-      } else {
-         const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
-        return m_storage.data()[index];
-      }
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
-    {
-      if (Options&RowMajor) {
-        const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
-        return m_storage.data()[index];
-      } else {
-        const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
-        return m_storage.data()[index];
-      }
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
-    {
-      if (Options&RowMajor) {
-        const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
-        return m_storage.data()[index];
-      } else {
-        const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
-        return m_storage.data()[index];
-      }
-    }
-#endif
-
 
     EIGEN_DEVICE_FUNC
     EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
@@ -222,7 +170,6 @@
       return coeff(index);
     }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     template<typename... IndexTypes>
     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, IndexTypes... otherIndices)
     {
@@ -230,52 +177,6 @@
       EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
       return operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
     }
-#else
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
-    {
-       if (Options&RowMajor) {
-         const Index index = i1 + i0 * m_storage.dimensions()[1];
-        return m_storage.data()[index];
-      } else {
-        const Index index = i0 + i1 * m_storage.dimensions()[0];
-        return m_storage.data()[index];
-      }
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
-    {
-       if (Options&RowMajor) {
-         const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
-        return m_storage.data()[index];
-      } else {
-         const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
-        return m_storage.data()[index];
-      }
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
-    {
-      if (Options&RowMajor) {
-        const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
-        return m_storage.data()[index];
-      } else {
-        const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
-        return m_storage.data()[index];
-      }
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
-    {
-      if (Options&RowMajor) {
-        const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
-        return m_storage.data()[index];
-      } else {
-        const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
-        return m_storage.data()[index];
-      }
-    }
-#endif
 
     EIGEN_DEVICE_FUNC
     EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h b/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h
index 170fc6a..e5030e9 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h
@@ -12,7 +12,7 @@
 
 #include "./InternalHeaderCheck.h"
 
-#if EIGEN_HAS_CONSTEXPR && EIGEN_HAS_VARIADIC_TEMPLATES
+#if EIGEN_HAS_CONSTEXPR
 
 #define EIGEN_HAS_INDEX_LIST
 
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h b/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h
index fc177a8..d8d977c 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h
@@ -10,8 +10,6 @@
 #ifndef EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H
 #define EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
-
 #include <initializer_list>
 
 #include "./InternalHeaderCheck.h"
@@ -79,6 +77,4 @@
 }  // namespace internal
 }  // namespace Eigen
 
-#endif  // EIGEN_HAS_VARIADIC_TEMPLATES
-
 #endif  // EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h
index 73ff3d2..f7cd827 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h
@@ -26,20 +26,6 @@
  *   void foo(){}
  */
 
-// SFINAE requires variadic templates
-#if !defined(EIGEN_GPUCC)
-#if EIGEN_HAS_VARIADIC_TEMPLATES
-  // SFINAE doesn't work for gcc <= 4.7
-  #ifdef EIGEN_COMP_GNUC
-    #if EIGEN_GNUC_AT_LEAST(4,8)
-      #define EIGEN_HAS_SFINAE
-    #endif
-  #else
-    #define EIGEN_HAS_SFINAE
-  #endif
-#endif
-#endif
-
 #define EIGEN_SFINAE_ENABLE_IF( __condition__ ) \
     typename internal::enable_if< ( __condition__ ) , int >::type = 0
 
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h
index 85dade6..7a2bad4 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h
@@ -84,35 +84,11 @@
       EIGEN_STATIC_ASSERT((0 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE)
     }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     template<typename... IndexTypes> EIGEN_DEVICE_FUNC
     EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index firstDimension, IndexTypes... otherDimensions) : m_data(dataPtr), m_dimensions(firstDimension, otherDimensions...) {
       // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
       EIGEN_STATIC_ASSERT((sizeof...(otherDimensions) + 1 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE)
     }
-#else
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index firstDimension) : m_data(dataPtr), m_dimensions(firstDimension) {
-      // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
-      EIGEN_STATIC_ASSERT((1 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE)
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2) : m_data(dataPtr), m_dimensions(dim1, dim2) {
-      EIGEN_STATIC_ASSERT(2 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE)
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2, Index dim3) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3) {
-      EIGEN_STATIC_ASSERT(3 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE)
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2, Index dim3, Index dim4) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3, dim4) {
-      EIGEN_STATIC_ASSERT(4 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE)
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2, Index dim3, Index dim4, Index dim5) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3, dim4, dim5) {
-      EIGEN_STATIC_ASSERT(5 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE)
-    }
-#endif
 
    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, const array<Index, NumIndices>& dimensions)
       : m_data(dataPtr), m_dimensions(dimensions)
@@ -167,7 +143,6 @@
       return m_data[index];
     }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     template<typename... IndexTypes> EIGEN_DEVICE_FUNC
     EIGEN_STRONG_INLINE StorageRefType operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
     {
@@ -181,52 +156,6 @@
         return m_data[index];
       }
     }
-#else
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1) const
-    {
-      if (PlainObjectType::Options&RowMajor) {
-        const Index index = i1 + i0 * m_dimensions[1];
-        return m_data[index];
-      } else {
-        const Index index = i0 + i1 * m_dimensions[0];
-        return m_data[index];
-      }
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2) const
-    {
-      if (PlainObjectType::Options&RowMajor) {
-         const Index index = i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0);
-         return m_data[index];
-      } else {
-         const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * i2);
-        return m_data[index];
-      }
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3) const
-    {
-      if (PlainObjectType::Options&RowMajor) {
-        const Index index = i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0));
-        return m_data[index];
-      } else {
-        const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * i3));
-        return m_data[index];
-      }
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
-    {
-      if (PlainObjectType::Options&RowMajor) {
-        const Index index = i4 + m_dimensions[4] * (i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0)));
-        return m_data[index];
-      } else {
-        const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * (i3 + m_dimensions[3] * i4)));
-        return m_data[index];
-      }
-    }
-#endif
 
     EIGEN_DEVICE_FUNC
     EIGEN_STRONG_INLINE StorageRefType operator()(const array<Index, NumIndices>& indices)
@@ -255,7 +184,6 @@
       return m_data[index];
     }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     template<typename... IndexTypes> EIGEN_DEVICE_FUNC
     EIGEN_STRONG_INLINE StorageRefType operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
     {
@@ -270,52 +198,6 @@
         return m_data[index];
       }
     }
-#else
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1)
-    {
-       if (PlainObjectType::Options&RowMajor) {
-         const Index index = i1 + i0 * m_dimensions[1];
-        return m_data[index];
-      } else {
-        const Index index = i0 + i1 * m_dimensions[0];
-        return m_data[index];
-      }
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2)
-    {
-       if (PlainObjectType::Options&RowMajor) {
-         const Index index = i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0);
-        return m_data[index];
-      } else {
-         const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * i2);
-        return m_data[index];
-      }
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3)
-    {
-      if (PlainObjectType::Options&RowMajor) {
-        const Index index = i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0));
-        return m_data[index];
-      } else {
-        const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * i3));
-        return m_data[index];
-      }
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
-    {
-      if (PlainObjectType::Options&RowMajor) {
-        const Index index = i4 + m_dimensions[4] * (i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0)));
-        return m_data[index];
-      } else {
-        const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * (i3 + m_dimensions[3] * i4)));
-        return m_data[index];
-      }
-    }
-#endif
 
     EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorMap)
 
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h b/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h
index cf891eb..8b107eb 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h
@@ -264,13 +264,12 @@
 };
 
 
-#ifdef EIGEN_HAS_SFINAE
 namespace internal {
 
-  template<typename IndexType, typename Index, Index... Is>
+  template<typename IndexType, typename Index, Index First, Index... Is>
   EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
-  array<Index, sizeof...(Is)> customIndices2Array(IndexType& idx, numeric_list<Index, Is...>) {
-    return { idx[Is]... };
+  array<Index, 1 + sizeof...(Is)> customIndices2Array(IndexType& idx, numeric_list<Index, First, Is...>) {
+    return { idx[First], idx[Is]... };
   }
   template<typename IndexType, typename Index>
   EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@@ -308,9 +307,6 @@
   };
 
 }
-#endif
-
-
 
 }  // namespace Eigen
 
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
index 2939b98..0342528 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
@@ -108,7 +108,7 @@
   static const bool value = false;
 };
 
-#if EIGEN_HAS_CONSTEXPR && EIGEN_HAS_VARIADIC_TEMPLATES
+#if EIGEN_HAS_CONSTEXPR
 template <typename ReducedDims, int NumTensorDims>
 struct are_inner_most_dims<ReducedDims, NumTensorDims, ColMajor>{
   static const bool tmp1 = indices_statically_known_to_increase<ReducedDims>();
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h b/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h
index a2e1af7..67631d2 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h
@@ -206,7 +206,6 @@
       return m_evaluator->coeff(index);
     }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     template<typename... IndexTypes> EIGEN_DEVICE_FUNC
     EIGEN_STRONG_INLINE const Scalar operator()(Index firstIndex, IndexTypes... otherIndices) const
     {
@@ -221,85 +220,6 @@
       const array<Index, num_indices> indices{{firstIndex, otherIndices...}};
       return coeffRef(indices);
     }
-#else
-
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1) const
-    {
-      array<Index, 2> indices;
-      indices[0] = i0;
-      indices[1] = i1;
-      return coeff(indices);
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1, Index i2) const
-    {
-      array<Index, 3> indices;
-      indices[0] = i0;
-      indices[1] = i1;
-      indices[2] = i2;
-      return coeff(indices);
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1, Index i2, Index i3) const
-    {
-      array<Index, 4> indices;
-      indices[0] = i0;
-      indices[1] = i1;
-      indices[2] = i2;
-      indices[3] = i3;
-      return coeff(indices);
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
-    {
-      array<Index, 5> indices;
-      indices[0] = i0;
-      indices[1] = i1;
-      indices[2] = i2;
-      indices[3] = i3;
-      indices[4] = i4;
-      return coeff(indices);
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE Scalar& coeffRef(Index i0, Index i1)
-    {
-      array<Index, 2> indices;
-      indices[0] = i0;
-      indices[1] = i1;
-      return coeffRef(indices);
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE Scalar& coeffRef(Index i0, Index i1, Index i2)
-    {
-      array<Index, 3> indices;
-      indices[0] = i0;
-      indices[1] = i1;
-      indices[2] = i2;
-      return coeffRef(indices);
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
-    {
-      array<Index, 4> indices;
-      indices[0] = i0;
-      indices[1] = i1;
-      indices[2] = i2;
-      indices[3] = i3;
-      return coeffRef(indices);
-    }
-    EIGEN_DEVICE_FUNC
-    EIGEN_STRONG_INLINE Scalar& coeffRef(Index i0, Index i1, Index i2, Index i3, Index i4)
-    {
-      array<Index, 5> indices;
-      indices[0] = i0;
-      indices[1] = i1;
-      indices[2] = i2;
-      indices[3] = i3;
-      indices[4] = i4;
-      return coeffRef(indices);
-    }
-#endif
 
     template <std::size_t NumIndices> EIGEN_DEVICE_FUNC
     EIGEN_STRONG_INLINE const Scalar coeff(const array<Index, NumIndices>& indices) const
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h b/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h
index 2dff543..e79cddf 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h
@@ -88,12 +88,10 @@
         : m_data(internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(size)), m_dimensions(dimensions)
       { EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     template <typename... DenseIndex>
     EIGEN_DEVICE_FUNC TensorStorage(DenseIndex... indices) : m_dimensions(indices...) {
       m_data = internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(internal::array_prod(m_dimensions));
     }
-#endif
 
     EIGEN_DEVICE_FUNC TensorStorage(const Self& other)
       : m_data(internal::conditional_aligned_new_auto<T,(Options_&DontAlign)==0>(internal::array_prod(other.m_dimensions)))
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h
index 6bb67e4..4545119 100644
--- a/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h
@@ -18,9 +18,7 @@
 
 #else
 
-#if ((EIGEN_COMP_GNUC && EIGEN_GNUC_AT_LEAST(4, 8)) || \
-     __has_feature(cxx_thread_local)                || \
-     (EIGEN_COMP_MSVC >= 1900) )
+#if ((EIGEN_COMP_GNUC) || __has_feature(cxx_thread_local) || EIGEN_COMP_MSVC )
 #define EIGEN_THREAD_LOCAL static thread_local
 #endif
 
diff --git a/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h
index a859c7b..f556ff6 100644
--- a/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h
+++ b/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h
@@ -11,10 +11,6 @@
 #define EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H
 
 // Try to come up with a portable way to yield
-#if EIGEN_COMP_GNUC && EIGEN_GNUC_AT_MOST(4, 7)
-#define EIGEN_THREAD_YIELD() sched_yield()
-#else
 #define EIGEN_THREAD_YIELD() std::this_thread::yield()
-#endif
 
 #endif  // EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H
diff --git a/unsupported/Eigen/CXX11/src/util/EmulateArray.h b/unsupported/Eigen/CXX11/src/util/EmulateArray.h
index f87cb81..a4b1d0c 100644
--- a/unsupported/Eigen/CXX11/src/util/EmulateArray.h
+++ b/unsupported/Eigen/CXX11/src/util/EmulateArray.h
@@ -10,10 +10,8 @@
 #ifndef EIGEN_EMULATE_ARRAY_H
 #define EIGEN_EMULATE_ARRAY_H
 
-// The array class is only available starting with cxx11. Emulate our own here
-// if needed. Beware, msvc still doesn't advertise itself as a c++11 compiler!
-// Moreover, CUDA doesn't support the STL containers, so we use our own instead.
-#if (__cplusplus <= 199711L && EIGEN_COMP_MSVC < 1900) || defined(EIGEN_GPUCC) || defined(EIGEN_AVOID_STL_ARRAY)
+// CUDA doesn't support the STL containers, so we use our own instead.
+#if defined(EIGEN_GPUCC) || defined(EIGEN_AVOID_STL_ARRAY)
 
 namespace Eigen {
 template <typename T, size_t n> class array {
@@ -152,13 +150,11 @@
     values[7] = v8;
   }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
   EIGEN_DEVICE_FUNC
   EIGEN_STRONG_INLINE array(std::initializer_list<T> l) {
     eigen_assert(l.size() == n);
     internal::smart_copy(l.begin(), l.end(), values);
   }
-#endif
 };
 
 
@@ -202,12 +198,10 @@
   EIGEN_DEVICE_FUNC
   EIGEN_STRONG_INLINE array() : dummy() { }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
   EIGEN_DEVICE_FUNC array(std::initializer_list<T> l) : dummy() {
     EIGEN_UNUSED_VARIABLE(l);
     eigen_assert(l.size() == 0);
   }
-#endif
 
  private:
   T dummy;
diff --git a/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h b/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h
index 277ab14..ca0e3d1 100644
--- a/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h
+++ b/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h
@@ -29,7 +29,7 @@
   */
 template <typename T>
 class MaxSizeVector {
-  static const size_t alignment = EIGEN_PLAIN_ENUM_MAX(EIGEN_ALIGNOF(T), sizeof(void*));
+  static const size_t alignment = internal::plain_enum_max(EIGEN_ALIGNOF(T), sizeof(void*));
  public:
   // Construct a new MaxSizeVector, reserve n elements.
   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
diff --git a/unsupported/Eigen/IterativeSolvers b/unsupported/Eigen/IterativeSolvers
index 3db7902..a22d2a3 100644
--- a/unsupported/Eigen/IterativeSolvers
+++ b/unsupported/Eigen/IterativeSolvers
@@ -22,8 +22,10 @@
   *  - a constrained conjugate gradient
   *  - a Householder GMRES implementation
   *  - an IDR(s) implementation
+  *  - a BiCGSTAB(L) implementation
   *  - a DGMRES implementation
   *  - a MINRES implementation
+  *  - a IDRSTABL implementation
   *
   * Choosing the best solver for solving \c A \c x = \c b depends a lot on the preconditioner chosen as well as the properties of \c A. The following flowchart might help you.
   * \dot width=50%
@@ -82,6 +84,8 @@
 #include "src/IterativeSolvers/DGMRES.h"
 #include "src/IterativeSolvers/MINRES.h"
 #include "src/IterativeSolvers/IDRS.h"
+#include "src/IterativeSolvers/BiCGSTABL.h"
+#include "src/IterativeSolvers/IDRSTABL.h"
 
 #include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
 
diff --git a/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h b/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h
index 5020f22..6ef6bf4 100644
--- a/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h
+++ b/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h
@@ -22,17 +22,8 @@
   AutoDiffJacobian(const Functor& f) : Functor(f) {}
 
   // forward constructors
-#if EIGEN_HAS_VARIADIC_TEMPLATES
   template<typename... T>
   AutoDiffJacobian(const T& ...Values) : Functor(Values...) {}
-#else
-  template<typename T0>
-  AutoDiffJacobian(const T0& a0) : Functor(a0) {}
-  template<typename T0, typename T1>
-  AutoDiffJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {}
-  template<typename T0, typename T1, typename T2>
-  AutoDiffJacobian(const T0& a0, const T1& a1, const T2& a2) : Functor(a0, a1, a2) {}
-#endif
 
   typedef typename Functor::InputType InputType;
   typedef typename Functor::ValueType ValueType;
@@ -52,7 +43,6 @@
   typedef Matrix<ActiveScalar, InputsAtCompileTime, 1> ActiveInput;
   typedef Matrix<ActiveScalar, ValuesAtCompileTime, 1> ActiveValue;
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
   // Some compilers don't accept variadic parameters after a default parameter,
   // i.e., we can't just write _jac=0 but we need to overload operator():
   EIGEN_STRONG_INLINE
@@ -63,19 +53,12 @@
   template<typename... ParamsType>
   void operator() (const InputType& x, ValueType* v, JacobianType* _jac,
                    const ParamsType&... Params) const
-#else
-  void operator() (const InputType& x, ValueType* v, JacobianType* _jac=0) const
-#endif
   {
     eigen_assert(v!=0);
 
     if (!_jac)
     {
-#if EIGEN_HAS_VARIADIC_TEMPLATES
       Functor::operator()(x, v, Params...);
-#else
-      Functor::operator()(x, v);
-#endif
       return;
     }
 
@@ -91,11 +74,7 @@
     for (Index i=0; i<jac.cols(); i++)
       ax[i].derivatives() = DerivativeType::Unit(x.rows(),i);
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
     Functor::operator()(ax, &av, Params...);
-#else
-    Functor::operator()(ax, &av);
-#endif
 
     for (Index i=0; i<jac.rows(); i++)
     {
diff --git a/unsupported/Eigen/src/IterativeSolvers/BiCGSTABL.h b/unsupported/Eigen/src/IterativeSolvers/BiCGSTABL.h
new file mode 100755
index 0000000..141d705
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/BiCGSTABL.h
@@ -0,0 +1,339 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2020 Chris Schoutrop <c.e.m.schoutrop@tue.nl>
+// Copyright (C) 2020 Jens Wehner <j.wehner@esciencecenter.nl>
+// Copyright (C) 2020 Jan van Dijk <j.v.dijk@tue.nl>
+// Copyright (C) 2020 Adithya Vijaykumar
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/*
+
+  This implementation of BiCGStab(L) is based on the papers
+      General algorithm:
+      1. G.L.G. Sleijpen, D.R. Fokkema. (1993). BiCGstab(l) for linear equations
+  involving unsymmetric matrices with complex spectrum. Electronic Transactions
+  on Numerical Analysis. Polynomial step update:
+      2. G.L.G. Sleijpen, M.B. Van Gijzen. (2010) Exploiting BiCGstab(l)
+  strategies to induce dimension reduction SIAM Journal on Scientific Computing.
+      3. Fokkema, Diederik R. Enhanced implementation of BiCGstab (l) for
+  solving linear systems of equations. Universiteit Utrecht. Mathematisch
+  Instituut, 1996
+      4. Sleijpen, G. L., & van der Vorst, H. A. (1996). Reliable updated
+  residuals in hybrid Bi-CG methods. Computing, 56(2), 141-163.
+*/
+
+#ifndef EIGEN_BICGSTABL_H
+#define EIGEN_BICGSTABL_H
+
+namespace Eigen {
+
+namespace internal {
+/**     \internal Low-level bi conjugate gradient stabilized algorithm with L
+   additional residual minimization steps \param mat The matrix A \param rhs The
+   right hand side vector b \param x On input and initial solution, on output
+   the computed solution. \param precond A preconditioner being able to
+   efficiently solve for an approximation of Ax=b (regardless of b) \param iters
+   On input the max number of iteration, on output the number of performed
+   iterations. \param tol_error On input the tolerance error, on output an
+   estimation of the relative error. \param L On input Number of additional
+   GMRES steps to take. If L is too large (~20) instabilities occur. \return
+   false in the case of numerical issue, for example a break down of BiCGSTABL.
+*/
+template <typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
+bool bicgstabl(const MatrixType &mat, const Rhs &rhs, Dest &x, const Preconditioner &precond, Index &iters,
+               typename Dest::RealScalar &tol_error, Index L) {
+  using numext::abs;
+  using numext::sqrt;
+  typedef typename Dest::RealScalar RealScalar;
+  typedef typename Dest::Scalar Scalar;
+  const Index N = rhs.size();
+  L = L < x.rows() ? L : x.rows();
+
+  Index k = 0;
+
+  const RealScalar tol = tol_error;
+  const Index maxIters = iters;
+
+  typedef Matrix<Scalar, Dynamic, 1> VectorType;
+  typedef Matrix<Scalar, Dynamic, Dynamic, ColMajor> DenseMatrixType;
+
+  DenseMatrixType rHat(N, L + 1);
+  DenseMatrixType uHat(N, L + 1);
+
+  // We start with an initial guess x_0 and let us set r_0 as (residual
+  // calculated from x_0)
+  VectorType x0 = x;
+  rHat.col(0) = rhs - mat * x0;  // r_0
+
+  x.setZero();  // This will contain the updates to the solution.
+  // rShadow is arbritary, but must never be orthogonal to any residual.
+  VectorType rShadow = VectorType::Random(N);
+
+  VectorType x_prime = x;
+
+  // Redundant: x is already set to 0
+  // x.setZero();
+  VectorType b_prime = rHat.col(0);
+
+  // Other vectors and scalars initialization
+  Scalar rho0 = 1.0;
+  Scalar alpha = 0.0;
+  Scalar omega = 1.0;
+
+  uHat.col(0).setZero();
+
+  bool bicg_convergence = false;
+
+  const RealScalar normb = rhs.stableNorm();
+  if (internal::isApprox(normb, RealScalar(0))) {
+    x.setZero();
+    iters = 0;
+    return true;
+  }
+  RealScalar normr = rHat.col(0).stableNorm();
+  RealScalar Mx = normr;
+  RealScalar Mr = normr;
+
+  // Keep track of the solution with the lowest residual
+  RealScalar normr_min = normr;
+  VectorType x_min = x_prime + x;
+
+  // Criterion for when to apply the group-wise update, conform ref 3.
+  const RealScalar delta = 0.01;
+
+  bool compute_res = false;
+  bool update_app = false;
+
+  while (normr > tol * normb && k < maxIters) {
+    rho0 *= -omega;
+
+    for (Index j = 0; j < L; ++j) {
+      const Scalar rho1 = rShadow.dot(rHat.col(j));
+
+      if (!(numext::isfinite)(rho1) || rho0 == RealScalar(0.0)) {
+        // We cannot continue computing, return the best solution found.
+        x += x_prime;
+
+        // Check if x is better than the best stored solution thus far.
+        normr = (rhs - mat * (precond.solve(x) + x0)).stableNorm();
+
+        if (normr > normr_min || !(numext::isfinite)(normr)) {
+          // x_min is a better solution than x, return x_min
+          x = x_min;
+          normr = normr_min;
+        }
+        tol_error = normr / normb;
+        iters = k;
+        // x contains the updates to x0, add those back to obtain the solution
+        x = precond.solve(x);
+        x += x0;
+        return (normr < tol * normb);
+      }
+
+      const Scalar beta = alpha * (rho1 / rho0);
+      rho0 = rho1;
+      // Update search directions
+      uHat.leftCols(j + 1) = rHat.leftCols(j + 1) - beta * uHat.leftCols(j + 1);
+      uHat.col(j + 1) = mat * precond.solve(uHat.col(j));
+      const Scalar sigma = rShadow.dot(uHat.col(j + 1));
+      alpha = rho1 / sigma;
+      // Update residuals
+      rHat.leftCols(j + 1) -= alpha * uHat.middleCols(1, j + 1);
+      rHat.col(j + 1) = mat * precond.solve(rHat.col(j));
+      // Complete BiCG iteration by updating x
+      x += alpha * uHat.col(0);
+      normr = rHat.col(0).stableNorm();
+      // Check for early exit
+      if (normr < tol * normb) {
+        /*
+          Convergence was achieved during BiCG step.
+          Without this check BiCGStab(L) fails for trivial matrices, such as
+          when the preconditioner already is the inverse, or the input matrix is
+          identity.
+        */
+        bicg_convergence = true;
+        break;
+      } else if (normr < normr_min) {
+        // We found an x with lower residual, keep this one.
+        x_min = x + x_prime;
+        normr_min = normr;
+      }
+    }
+    if (!bicg_convergence) {
+      /*
+        The polynomial/minimize residual step.
+
+        QR Householder method for argmin is more stable than (modified)
+        Gram-Schmidt, in the sense that there is less loss of orthogonality. It
+        is more accurate than solving the normal equations, since the normal
+        equations scale with condition number squared.
+      */
+      const VectorType gamma = rHat.rightCols(L).householderQr().solve(rHat.col(0));
+      x += rHat.leftCols(L) * gamma;
+      rHat.col(0) -= rHat.rightCols(L) * gamma;
+      uHat.col(0) -= uHat.rightCols(L) * gamma;
+      normr = rHat.col(0).stableNorm();
+      omega = gamma(L - 1);
+    }
+    if (normr < normr_min) {
+      // We found an x with lower residual, keep this one.
+      x_min = x + x_prime;
+      normr_min = normr;
+    }
+
+    k++;
+
+    /*
+      Reliable update part
+
+      The recursively computed residual can deviate from the actual residual
+      after several iterations. However, computing the residual from the
+      definition costs extra MVs and should not be done at each iteration. The
+      reliable update strategy computes the true residual from the definition:
+      r=b-A*x at strategic intervals. Furthermore a "group wise update" strategy
+      is used to combine updates, which improves accuracy.
+    */
+
+    // Maximum norm of residuals since last update of x.
+    Mx = numext::maxi(Mx, normr);
+    // Maximum norm of residuals since last computation of the true residual.
+    Mr = numext::maxi(Mr, normr);
+
+    if (normr < delta * normb && normb <= Mx) {
+      update_app = true;
+    }
+
+    if (update_app || (normr < delta * Mr && normb <= Mr)) {
+      compute_res = true;
+    }
+
+    if (bicg_convergence) {
+      update_app = true;
+      compute_res = true;
+      bicg_convergence = false;
+    }
+
+    if (compute_res) {
+      // Explicitly compute residual from the definition
+
+      // This is equivalent to the shifted version of rhs - mat *
+      // (precond.solve(x)+x0)
+      rHat.col(0) = b_prime - mat * precond.solve(x);
+      normr = rHat.col(0).stableNorm();
+      Mr = normr;
+
+      if (update_app) {
+        // After the group wise update, the original problem is translated to a
+        // shifted one.
+        x_prime += x;
+        x.setZero();
+        b_prime = rHat.col(0);
+        Mx = normr;
+      }
+    }
+    if (normr < normr_min) {
+      // We found an x with lower residual, keep this one.
+      x_min = x + x_prime;
+      normr_min = normr;
+    }
+
+    compute_res = false;
+    update_app = false;
+  }
+
+  // Convert internal variable to the true solution vector x
+  x += x_prime;
+
+  normr = (rhs - mat * (precond.solve(x) + x0)).stableNorm();
+  if (normr > normr_min || !(numext::isfinite)(normr)) {
+    // x_min is a better solution than x, return x_min
+    x = x_min;
+    normr = normr_min;
+  }
+  tol_error = normr / normb;
+  iters = k;
+
+  // x contains the updates to x0, add those back to obtain the solution
+  x = precond.solve(x);
+  x += x0;
+  return true;
+}
+
+}  // namespace internal
+
+template <typename MatrixType_, typename Preconditioner_ = DiagonalPreconditioner<typename MatrixType_::Scalar>>
+class BiCGSTABL;
+
+namespace internal {
+
+template <typename MatrixType_, typename Preconditioner_>
+struct traits<Eigen::BiCGSTABL<MatrixType_, Preconditioner_>> {
+  typedef MatrixType_ MatrixType;
+  typedef Preconditioner_ Preconditioner;
+};
+
+}  // namespace internal
+
+template <typename MatrixType_, typename Preconditioner_>
+class BiCGSTABL : public IterativeSolverBase<BiCGSTABL<MatrixType_, Preconditioner_>> {
+  typedef IterativeSolverBase<BiCGSTABL> Base;
+  using Base::m_error;
+  using Base::m_info;
+  using Base::m_isInitialized;
+  using Base::m_iterations;
+  using Base::matrix;
+  Index m_L;
+
+ public:
+  typedef MatrixType_ MatrixType;
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename MatrixType::RealScalar RealScalar;
+  typedef Preconditioner_ Preconditioner;
+
+  /** Default constructor. */
+  BiCGSTABL() : m_L(2) {}
+
+  /**
+  Initialize the solver with matrix \a A for further \c Ax=b solving.
+
+  This constructor is a shortcut for the default constructor followed
+  by a call to compute().
+
+  \warning this class stores a reference to the matrix A as well as some
+  precomputed values that depend on it. Therefore, if \a A is changed
+  this class becomes invalid. Call compute() to update it with the new
+  matrix A, or modify a copy of A.
+  */
+  template <typename MatrixDerived>
+  explicit BiCGSTABL(const EigenBase<MatrixDerived> &A) : Base(A.derived()), m_L(2) {}
+
+  /** \internal */
+  /** Loops over the number of columns of b and does the following:
+    1. sets the tolerence and maxIterations
+    2. Calls the function that has the core solver routine
+  */
+  template <typename Rhs, typename Dest>
+  void _solve_vector_with_guess_impl(const Rhs &b, Dest &x) const {
+    m_iterations = Base::maxIterations();
+
+    m_error = Base::m_tolerance;
+
+    bool ret = internal::bicgstabl(matrix(), b, x, Base::m_preconditioner, m_iterations, m_error, m_L);
+    m_info = (!ret) ? NumericalIssue : m_error <= Base::m_tolerance ? Success : NoConvergence;
+  }
+
+  /** Sets the parameter L, indicating how many minimize residual steps are
+   * used. Default: 2 */
+  void setL(Index L) {
+    eigen_assert(L >= 1 && "L needs to be positive");
+    m_L = L;
+  }
+};
+
+}  // namespace Eigen
+
+#endif /* EIGEN_BICGSTABL_H */
diff --git a/unsupported/Eigen/src/IterativeSolvers/IDRS.h b/unsupported/Eigen/src/IterativeSolvers/IDRS.h
index 63d7cb8..2c7d7b0 100755
--- a/unsupported/Eigen/src/IterativeSolvers/IDRS.h
+++ b/unsupported/Eigen/src/IterativeSolvers/IDRS.h
@@ -9,429 +9,388 @@
 // Public License v. 2.0. If a copy of the MPL was not distributed
 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
-
 #ifndef EIGEN_IDRS_H
 #define EIGEN_IDRS_H
 
 #include "./InternalHeaderCheck.h"
 
-namespace Eigen
-{
+namespace Eigen {
 
-	namespace internal
-	{
-		/**     \internal Low-level Induced Dimension Reduction algorithm
-		        \param A The matrix A
-		        \param b The right hand side vector b
-		        \param x On input and initial solution, on output the computed solution.
-		        \param precond A preconditioner being able to efficiently solve for an
-		                  approximation of Ax=b (regardless of b)
-		        \param iter On input the max number of iteration, on output the number of performed iterations.
-		        \param relres On input the tolerance error, on output an estimation of the relative error.
-		        \param S On input Number of the dimension of the shadow space.
-				\param smoothing switches residual smoothing on.
-				\param angle small omega lead to faster convergence at the expense of numerical stability
-				\param replacement switches on a residual replacement strategy to increase accuracy of residual at the expense of more Mat*vec products
-		        \return false in the case of numerical issue, for example a break down of IDRS.
-		*/
-		template<typename Vector, typename RealScalar>
-		typename Vector::Scalar omega(const Vector& t, const Vector& s, RealScalar angle)
-		{
-			using numext::abs;
-			typedef typename Vector::Scalar Scalar;
-			const RealScalar ns = s.norm();
-			const RealScalar nt = t.norm();
-			const Scalar ts = t.dot(s);
-			const RealScalar rho = abs(ts / (nt * ns));
+namespace internal {
+/**     \internal Low-level Induced Dimension Reduction algorithm
+        \param A The matrix A
+        \param b The right hand side vector b
+        \param x On input and initial solution, on output the computed solution.
+        \param precond A preconditioner being able to efficiently solve for an
+                  approximation of Ax=b (regardless of b)
+        \param iter On input the max number of iteration, on output the number of performed iterations.
+        \param relres On input the tolerance error, on output an estimation of the relative error.
+        \param S On input Number of the dimension of the shadow space.
+                \param smoothing switches residual smoothing on.
+                \param angle small omega lead to faster convergence at the expense of numerical stability
+                \param replacement switches on a residual replacement strategy to increase accuracy of residual at the
+   expense of more Mat*vec products \return false in the case of numerical issue, for example a break down of IDRS.
+*/
+template <typename Vector, typename RealScalar>
+typename Vector::Scalar omega(const Vector& t, const Vector& s, RealScalar angle) {
+  using numext::abs;
+  typedef typename Vector::Scalar Scalar;
+  const RealScalar ns = s.stableNorm();
+  const RealScalar nt = t.stableNorm();
+  const Scalar ts = t.dot(s);
+  const RealScalar rho = abs(ts / (nt * ns));
 
-			if (rho < angle) {
-				if (ts == Scalar(0)) {
-					return Scalar(0);
-				}
-				// Original relation for om is given by
-				// om = om * angle / rho;
-				// To alleviate potential (near) division by zero this can be rewritten as
-				// om = angle * (ns / nt) * (ts / abs(ts)) = angle * (ns / nt) * sgn(ts)
-  				return angle * (ns / nt) * (ts / abs(ts));
-			}
-			return ts / (nt * nt);
-		}
+  if (rho < angle) {
+    if (ts == Scalar(0)) {
+      return Scalar(0);
+    }
+    // Original relation for om is given by
+    // om = om * angle / rho;
+    // To alleviate potential (near) division by zero this can be rewritten as
+    // om = angle * (ns / nt) * (ts / abs(ts)) = angle * (ns / nt) * sgn(ts)
+    return angle * (ns / nt) * (ts / abs(ts));
+  }
+  return ts / (nt * nt);
+}
 
-		template <typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
-		bool idrs(const MatrixType& A, const Rhs& b, Dest& x, const Preconditioner& precond,
-			Index& iter,
-			typename Dest::RealScalar& relres, Index S, bool smoothing, typename Dest::RealScalar angle, bool replacement)
-		{
-			typedef typename Dest::RealScalar RealScalar;
-			typedef typename Dest::Scalar Scalar;
-			typedef Matrix<Scalar, Dynamic, 1> VectorType;
-			typedef Matrix<Scalar, Dynamic, Dynamic, ColMajor> DenseMatrixType;
-			const Index N = b.size();
-			S = S < x.rows() ? S : x.rows();
-			const RealScalar tol = relres;
-			const Index maxit = iter;
+template <typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
+bool idrs(const MatrixType& A, const Rhs& b, Dest& x, const Preconditioner& precond, Index& iter,
+          typename Dest::RealScalar& relres, Index S, bool smoothing, typename Dest::RealScalar angle,
+          bool replacement) {
+  typedef typename Dest::RealScalar RealScalar;
+  typedef typename Dest::Scalar Scalar;
+  typedef Matrix<Scalar, Dynamic, 1> VectorType;
+  typedef Matrix<Scalar, Dynamic, Dynamic, ColMajor> DenseMatrixType;
+  const Index N = b.size();
+  S = S < x.rows() ? S : x.rows();
+  const RealScalar tol = relres;
+  const Index maxit = iter;
 
-			Index replacements = 0;
-			bool trueres = false;
+  Index replacements = 0;
+  bool trueres = false;
 
-			FullPivLU<DenseMatrixType> lu_solver;
+  FullPivLU<DenseMatrixType> lu_solver;
 
-			DenseMatrixType P;
-			{
-				HouseholderQR<DenseMatrixType> qr(DenseMatrixType::Random(N, S));
-			    P = (qr.householderQ() * DenseMatrixType::Identity(N, S));
-			}
+  DenseMatrixType P;
+  {
+    HouseholderQR<DenseMatrixType> qr(DenseMatrixType::Random(N, S));
+    P = (qr.householderQ() * DenseMatrixType::Identity(N, S));
+  }
 
-			const RealScalar normb = b.norm();
+  const RealScalar normb = b.stableNorm();
 
-			if (internal::isApprox(normb, RealScalar(0)))
-			{
-				//Solution is the zero vector
-				x.setZero();
-				iter = 0;
-				relres = 0;
-				return true;
-			}
-			 // from http://homepage.tudelft.nl/1w5b5/IDRS/manual.pdf
-			 // A peak in the residual is considered dangerously high if‖ri‖/‖b‖> C(tol/epsilon).
-			 // With epsilon the
-             // relative machine precision. The factor tol/epsilon corresponds to the size of a
-             // finite precision number that is so large that the absolute round-off error in
-             // this number, when propagated through the process, makes it impossible to
-             // achieve the required accuracy.The factor C accounts for the accumulation of
-             // round-off errors. This parameter has beenset to 10−3.
-			 // mp is epsilon/C
-			 // 10^3 * eps is very conservative, so normally no residual replacements will take place. 
-			 // It only happens if things go very wrong. Too many restarts may ruin the convergence.
-			const RealScalar mp = RealScalar(1e3) * NumTraits<Scalar>::epsilon();
+  if (internal::isApprox(normb, RealScalar(0))) {
+    // Solution is the zero vector
+    x.setZero();
+    iter = 0;
+    relres = 0;
+    return true;
+  }
+  // from http://homepage.tudelft.nl/1w5b5/IDRS/manual.pdf
+  // A peak in the residual is considered dangerously high if‖ri‖/‖b‖> C(tol/epsilon).
+  // With epsilon the
+  // relative machine precision. The factor tol/epsilon corresponds to the size of a
+  // finite precision number that is so large that the absolute round-off error in
+  // this number, when propagated through the process, makes it impossible to
+  // achieve the required accuracy.The factor C accounts for the accumulation of
+  // round-off errors. This parameter has beenset to 10−3.
+  // mp is epsilon/C
+  // 10^3 * eps is very conservative, so normally no residual replacements will take place.
+  // It only happens if things go very wrong. Too many restarts may ruin the convergence.
+  const RealScalar mp = RealScalar(1e3) * NumTraits<Scalar>::epsilon();
 
+  // Compute initial residual
+  const RealScalar tolb = tol * normb;  // Relative tolerance
+  VectorType r = b - A * x;
 
+  VectorType x_s, r_s;
 
-			//Compute initial residual
-			const RealScalar tolb = tol * normb; //Relative tolerance
-			VectorType r = b - A * x;
+  if (smoothing) {
+    x_s = x;
+    r_s = r;
+  }
 
-			VectorType x_s, r_s;
+  RealScalar normr = r.stableNorm();
 
-			if (smoothing)
-			{
-				x_s = x;
-				r_s = r;
-			}
+  if (normr <= tolb) {
+    // Initial guess is a good enough solution
+    iter = 0;
+    relres = normr / normb;
+    return true;
+  }
 
-			RealScalar normr = r.norm();
+  DenseMatrixType G = DenseMatrixType::Zero(N, S);
+  DenseMatrixType U = DenseMatrixType::Zero(N, S);
+  DenseMatrixType M = DenseMatrixType::Identity(S, S);
+  VectorType t(N), v(N);
+  Scalar om = 1.;
 
-			if (normr <= tolb)
-			{
-				//Initial guess is a good enough solution
-				iter = 0;
-				relres = normr / normb;
-				return true;
-			}
+  // Main iteration loop, guild G-spaces:
+  iter = 0;
 
-			DenseMatrixType G = DenseMatrixType::Zero(N, S);
-			DenseMatrixType U = DenseMatrixType::Zero(N, S);
-			DenseMatrixType M = DenseMatrixType::Identity(S, S);
-			VectorType t(N), v(N);
-			Scalar om = 1.;
+  while (normr > tolb && iter < maxit) {
+    // New right hand size for small system:
+    VectorType f = (r.adjoint() * P).adjoint();
 
-			//Main iteration loop, guild G-spaces:
-			iter = 0;
+    for (Index k = 0; k < S; ++k) {
+      // Solve small system and make v orthogonal to P:
+      // c = M(k:s,k:s)\f(k:s);
+      lu_solver.compute(M.block(k, k, S - k, S - k));
+      VectorType c = lu_solver.solve(f.segment(k, S - k));
+      // v = r - G(:,k:s)*c;
+      v = r - G.rightCols(S - k) * c;
+      // Preconditioning
+      v = precond.solve(v);
 
-			while (normr > tolb && iter < maxit)
-			{
-				//New right hand size for small system:
-				VectorType f = (r.adjoint() * P).adjoint();
+      // Compute new U(:,k) and G(:,k), G(:,k) is in space G_j
+      U.col(k) = U.rightCols(S - k) * c + om * v;
+      G.col(k) = A * U.col(k);
 
-				for (Index k = 0; k < S; ++k)
-				{
-					//Solve small system and make v orthogonal to P:
-					//c = M(k:s,k:s)\f(k:s);
-					lu_solver.compute(M.block(k , k , S -k, S - k ));
-					VectorType c = lu_solver.solve(f.segment(k , S - k ));
-					//v = r - G(:,k:s)*c;
-					v = r - G.rightCols(S - k ) * c;
-					//Preconditioning
-					v = precond.solve(v);
+      // Bi-Orthogonalise the new basis vectors:
+      for (Index i = 0; i < k - 1; ++i) {
+        // alpha =  ( P(:,i)'*G(:,k) )/M(i,i);
+        Scalar alpha = P.col(i).dot(G.col(k)) / M(i, i);
+        G.col(k) = G.col(k) - alpha * G.col(i);
+        U.col(k) = U.col(k) - alpha * U.col(i);
+      }
 
-					//Compute new U(:,k) and G(:,k), G(:,k) is in space G_j
-					U.col(k) = U.rightCols(S - k ) * c + om * v;
-					G.col(k) = A * U.col(k );
+      // New column of M = P'*G  (first k-1 entries are zero)
+      // M(k:s,k) = (G(:,k)'*P(:,k:s))';
+      M.block(k, k, S - k, 1) = (G.col(k).adjoint() * P.rightCols(S - k)).adjoint();
 
-					//Bi-Orthogonalise the new basis vectors:
-					for (Index i = 0; i < k-1 ; ++i)
-					{
-						//alpha =  ( P(:,i)'*G(:,k) )/M(i,i);
-						Scalar alpha = P.col(i ).dot(G.col(k )) / M(i, i );
-						G.col(k ) = G.col(k ) - alpha * G.col(i );
-						U.col(k ) = U.col(k ) - alpha * U.col(i );
-					}
+      if (internal::isApprox(M(k, k), Scalar(0))) {
+        return false;
+      }
 
-					//New column of M = P'*G  (first k-1 entries are zero)
-					//M(k:s,k) = (G(:,k)'*P(:,k:s))';
-					M.block(k , k , S - k , 1) = (G.col(k ).adjoint() * P.rightCols(S - k )).adjoint();
+      // Make r orthogonal to q_i, i = 0..k-1
+      Scalar beta = f(k) / M(k, k);
+      r = r - beta * G.col(k);
+      x = x + beta * U.col(k);
+      normr = r.stableNorm();
 
-					if (internal::isApprox(M(k,k), Scalar(0)))
-					{
-						return false;
-					}
+      if (replacement && normr > tolb / mp) {
+        trueres = true;
+      }
 
-					//Make r orthogonal to q_i, i = 0..k-1
-					Scalar beta = f(k ) / M(k , k );
-					r = r - beta * G.col(k );
-					x = x + beta * U.col(k );
-					normr = r.norm();
+      // Smoothing:
+      if (smoothing) {
+        t = r_s - r;
+        // gamma is a Scalar, but the conversion is not allowed
+        Scalar gamma = t.dot(r_s) / t.stableNorm();
+        r_s = r_s - gamma * t;
+        x_s = x_s - gamma * (x_s - x);
+        normr = r_s.stableNorm();
+      }
 
-					if (replacement && normr > tolb / mp)
-					{
-						trueres = true;
-					}
+      if (normr < tolb || iter == maxit) {
+        break;
+      }
 
-					//Smoothing:
-					if (smoothing)
-					{
-						t = r_s - r;
-						//gamma is a Scalar, but the conversion is not allowed
-						Scalar gamma = t.dot(r_s) / t.norm();
-						r_s = r_s - gamma * t;
-						x_s = x_s - gamma * (x_s - x);
-						normr = r_s.norm();
-					}
+      // New f = P'*r (first k  components are zero)
+      if (k < S - 1) {
+        f.segment(k + 1, S - (k + 1)) = f.segment(k + 1, S - (k + 1)) - beta * M.block(k + 1, k, S - (k + 1), 1);
+      }
+    }  // end for
 
-					if (normr < tolb || iter == maxit)
-					{
-						break;
-					}
+    if (normr < tolb || iter == maxit) {
+      break;
+    }
 
-					//New f = P'*r (first k  components are zero)
-					if (k < S-1)
-					{
-						f.segment(k + 1, S - (k + 1) ) = f.segment(k + 1 , S - (k + 1)) - beta * M.block(k + 1 , k , S - (k + 1), 1);
-					}
-				}//end for
+    // Now we have sufficient vectors in G_j to compute residual in G_j+1
+    // Note: r is already perpendicular to P so v = r
+    // Preconditioning
+    v = r;
+    v = precond.solve(v);
 
-				if (normr < tolb || iter == maxit)
-				{
-					break;
-				}
+    // Matrix-vector multiplication:
+    t = A * v;
 
-				//Now we have sufficient vectors in G_j to compute residual in G_j+1
-				//Note: r is already perpendicular to P so v = r
-				//Preconditioning
-				v = r;
-				v = precond.solve(v);
+    // Computation of a new omega
+    om = internal::omega(t, r, angle);
 
-				//Matrix-vector multiplication:
-				t = A * v;
+    if (om == RealScalar(0.0)) {
+      return false;
+    }
 
-				//Computation of a new omega
-				om = internal::omega(t, r, angle);
+    r = r - om * t;
+    x = x + om * v;
+    normr = r.stableNorm();
 
-				if (om == RealScalar(0.0))
-				{
-					return false;
-				}
+    if (replacement && normr > tolb / mp) {
+      trueres = true;
+    }
 
-				r = r - om * t;
-				x = x + om * v;
-				normr = r.norm();
+    // Residual replacement?
+    if (trueres && normr < normb) {
+      r = b - A * x;
+      trueres = false;
+      replacements++;
+    }
 
-				if (replacement && normr > tolb / mp)
-				{
-					trueres = true;
-				}
+    // Smoothing:
+    if (smoothing) {
+      t = r_s - r;
+      Scalar gamma = t.dot(r_s) / t.stableNorm();
+      r_s = r_s - gamma * t;
+      x_s = x_s - gamma * (x_s - x);
+      normr = r_s.stableNorm();
+    }
 
-				//Residual replacement?
-				if (trueres && normr < normb)
-				{
-					r = b - A * x;
-					trueres = false;
-					replacements++;
-				}
+    iter++;
 
-				//Smoothing:
-				if (smoothing)
-				{
-					t = r_s - r;
-					Scalar gamma = t.dot(r_s) /t.norm();
-					r_s = r_s - gamma * t;
-					x_s = x_s - gamma * (x_s - x);
-					normr = r_s.norm();
-				}
+  }  // end while
 
-				iter++;
+  if (smoothing) {
+    x = x_s;
+  }
+  relres = normr / normb;
+  return true;
+}
 
-			}//end while
+}  // namespace internal
 
-			if (smoothing)
-			{
-				x = x_s;
-			}
-			relres=normr/normb;
-			return true;
-		}
+template <typename MatrixType_, typename Preconditioner_ = DiagonalPreconditioner<typename MatrixType_::Scalar> >
+class IDRS;
 
-	}  // namespace internal
+namespace internal {
 
-	template <typename MatrixType_, typename Preconditioner_ = DiagonalPreconditioner<typename MatrixType_::Scalar> >
-	class IDRS;
+template <typename MatrixType_, typename Preconditioner_>
+struct traits<Eigen::IDRS<MatrixType_, Preconditioner_> > {
+  typedef MatrixType_ MatrixType;
+  typedef Preconditioner_ Preconditioner;
+};
 
-	namespace internal
-	{
-
-		template <typename MatrixType_, typename Preconditioner_>
-		struct traits<Eigen::IDRS<MatrixType_, Preconditioner_> >
-		{
-			typedef MatrixType_ MatrixType;
-			typedef Preconditioner_ Preconditioner;
-		};
-
-	}  // namespace internal
-
+}  // namespace internal
 
 /** \ingroup IterativeLinearSolvers_Module
-  * \brief The Induced Dimension Reduction method (IDR(s)) is a short-recurrences Krylov method for sparse square problems.
-  *
-  * This class allows to solve for A.x = b sparse linear problems. The vectors x and b can be either dense or sparse.
-  * he Induced Dimension Reduction method, IDR(), is a robust and efficient short-recurrence Krylov subspace method for
-  * solving large nonsymmetric systems of linear equations.
-  *
-  * For indefinite systems IDR(S) outperforms both BiCGStab and BiCGStab(L). Additionally, IDR(S) can handle matrices
-  * with complex eigenvalues more efficiently than BiCGStab.
-  *
-  * Many problems that do not converge for BiCGSTAB converge for IDR(s) (for larger values of s). And if both methods 
-  * converge the convergence for IDR(s) is typically much faster for difficult systems (for example indefinite problems). 
-  *
-  * IDR(s) is a limited memory finite termination method. In exact arithmetic it converges in at most N+N/s iterations,
-  * with N the system size.  It uses a fixed number of 4+3s vector. In comparison, BiCGSTAB terminates in 2N iterations 
-  * and uses 7 vectors. GMRES terminates in at most N iterations, and uses I+3 vectors, with I the number of iterations. 
-  * Restarting GMRES limits the memory consumption, but destroys the finite termination property.
-  *
-  * \tparam MatrixType_ the type of the sparse matrix A, can be a dense or a sparse matrix.
-  * \tparam Preconditioner_ the type of the preconditioner. Default is DiagonalPreconditioner
-  *
-  * \implsparsesolverconcept
-  *
-  * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
-  * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
-  * and NumTraits<Scalar>::epsilon() for the tolerance.
-  *
-  * The tolerance corresponds to the relative residual error: |Ax-b|/|b|
-  *
-  * \b Performance: when using sparse matrices, best performance is achied for a row-major sparse matrix format.
-  * Moreover, in this case multi-threading can be exploited if the user code is compiled with OpenMP enabled.
-  * See \ref TopicMultiThreading for details.
-  *
-  * By default the iterations start with x=0 as an initial guess of the solution.
-  * One can control the start using the solveWithGuess() method.
-  *
-  * IDR(s) can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
-  *
-  * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+ * \brief The Induced Dimension Reduction method (IDR(s)) is a short-recurrences Krylov method for sparse square
+ * problems.
+ *
+ * This class allows to solve for A.x = b sparse linear problems. The vectors x and b can be either dense or sparse.
+ * he Induced Dimension Reduction method, IDR(), is a robust and efficient short-recurrence Krylov subspace method for
+ * solving large nonsymmetric systems of linear equations.
+ *
+ * For indefinite systems IDR(S) outperforms both BiCGStab and BiCGStab(L). Additionally, IDR(S) can handle matrices
+ * with complex eigenvalues more efficiently than BiCGStab.
+ *
+ * Many problems that do not converge for BiCGSTAB converge for IDR(s) (for larger values of s). And if both methods
+ * converge the convergence for IDR(s) is typically much faster for difficult systems (for example indefinite problems).
+ *
+ * IDR(s) is a limited memory finite termination method. In exact arithmetic it converges in at most N+N/s iterations,
+ * with N the system size.  It uses a fixed number of 4+3s vector. In comparison, BiCGSTAB terminates in 2N iterations
+ * and uses 7 vectors. GMRES terminates in at most N iterations, and uses I+3 vectors, with I the number of iterations.
+ * Restarting GMRES limits the memory consumption, but destroys the finite termination property.
+ *
+ * \tparam MatrixType_ the type of the sparse matrix A, can be a dense or a sparse matrix.
+ * \tparam Preconditioner_ the type of the preconditioner. Default is DiagonalPreconditioner
+ *
+ * \implsparsesolverconcept
+ *
+ * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
+ * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
+ * and NumTraits<Scalar>::epsilon() for the tolerance.
+ *
+ * The tolerance corresponds to the relative residual error: |Ax-b|/|b|
+ *
+ * \b Performance: when using sparse matrices, best performance is achied for a row-major sparse matrix format.
+ * Moreover, in this case multi-threading can be exploited if the user code is compiled with OpenMP enabled.
+ * See \ref TopicMultiThreading for details.
+ *
+ * By default the iterations start with x=0 as an initial guess of the solution.
+ * One can control the start using the solveWithGuess() method.
+ *
+ * IDR(s) can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
+ *
+ * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+ */
+template <typename MatrixType_, typename Preconditioner_>
+class IDRS : public IterativeSolverBase<IDRS<MatrixType_, Preconditioner_> > {
+ public:
+  typedef MatrixType_ MatrixType;
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename MatrixType::RealScalar RealScalar;
+  typedef Preconditioner_ Preconditioner;
+
+ private:
+  typedef IterativeSolverBase<IDRS> Base;
+  using Base::m_error;
+  using Base::m_info;
+  using Base::m_isInitialized;
+  using Base::m_iterations;
+  using Base::matrix;
+  Index m_S;
+  bool m_smoothing;
+  RealScalar m_angle;
+  bool m_residual;
+
+ public:
+  /** Default constructor. */
+  IDRS() : m_S(4), m_smoothing(false), m_angle(RealScalar(0.7)), m_residual(false) {}
+
+  /**     Initialize the solver with matrix \a A for further \c Ax=b solving.
+
+          This constructor is a shortcut for the default constructor followed
+          by a call to compute().
+
+          \warning this class stores a reference to the matrix A as well as some
+          precomputed values that depend on it. Therefore, if \a A is changed
+          this class becomes invalid. Call compute() to update it with the new
+          matrix A, or modify a copy of A.
   */
-	template <typename MatrixType_, typename Preconditioner_>
-	class IDRS : public IterativeSolverBase<IDRS<MatrixType_, Preconditioner_> >
-	{
+  template <typename MatrixDerived>
+  explicit IDRS(const EigenBase<MatrixDerived>& A)
+      : Base(A.derived()), m_S(4), m_smoothing(false), m_angle(RealScalar(0.7)), m_residual(false) {}
 
-		public:
-			typedef MatrixType_ MatrixType;
-			typedef typename MatrixType::Scalar Scalar;
-			typedef typename MatrixType::RealScalar RealScalar;
-			typedef Preconditioner_ Preconditioner;
+  /** \internal */
+  /**     Loops over the number of columns of b and does the following:
+                  1. sets the tolerance and maxIterations
+                  2. Calls the function that has the core solver routine
+  */
+  template <typename Rhs, typename Dest>
+  void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const {
+    m_iterations = Base::maxIterations();
+    m_error = Base::m_tolerance;
 
-		private:
-			typedef IterativeSolverBase<IDRS> Base;
-			using Base::m_error;
-			using Base::m_info;
-			using Base::m_isInitialized;
-			using Base::m_iterations;
-			using Base::matrix;
-			Index m_S;
-			bool m_smoothing;
-			RealScalar m_angle;
-			bool m_residual;
+    bool ret = internal::idrs(matrix(), b, x, Base::m_preconditioner, m_iterations, m_error, m_S, m_smoothing, m_angle,
+                              m_residual);
 
-		public:
-			/** Default constructor. */
-			IDRS(): m_S(4), m_smoothing(false), m_angle(RealScalar(0.7)), m_residual(false) {}
+    m_info = (!ret) ? NumericalIssue : m_error <= Base::m_tolerance ? Success : NoConvergence;
+  }
 
-			/**     Initialize the solver with matrix \a A for further \c Ax=b solving.
+  /** Sets the parameter S, indicating the dimension of the shadow space. Default is 4*/
+  void setS(Index S) {
+    if (S < 1) {
+      S = 4;
+    }
 
-			        This constructor is a shortcut for the default constructor followed
-			        by a call to compute().
+    m_S = S;
+  }
 
-			        \warning this class stores a reference to the matrix A as well as some
-			        precomputed values that depend on it. Therefore, if \a A is changed
-			        this class becomes invalid. Call compute() to update it with the new
-			        matrix A, or modify a copy of A.
-			*/
-			template <typename MatrixDerived>
-			explicit IDRS(const EigenBase<MatrixDerived>& A) : Base(A.derived()), m_S(4), m_smoothing(false),
-															   m_angle(RealScalar(0.7)), m_residual(false) {}
+  /** Switches off and on smoothing.
+  Residual smoothing results in monotonically decreasing residual norms at
+  the expense of two extra vectors of storage and a few extra vector
+  operations. Although monotonic decrease of the residual norms is a
+  desirable property, the rate of convergence of the unsmoothed process and
+  the smoothed process is basically the same. Default is off */
+  void setSmoothing(bool smoothing) { m_smoothing = smoothing; }
 
+  /** The angle must be a real scalar. In IDR(s), a value for the
+  iteration parameter omega must be chosen in every s+1th step. The most
+  natural choice is to select a value to minimize the norm of the next residual.
+  This corresponds to the parameter omega = 0. In practice, this may lead to
+  values of omega that are so small that the other iteration parameters
+  cannot be computed with sufficient accuracy. In such cases it is better to
+  increase the value of omega sufficiently such that a compromise is reached
+  between accurate computations and reduction of the residual norm. The
+  parameter angle =0.7 (”maintaining the convergence strategy”)
+  results in such a compromise. */
+  void setAngle(RealScalar angle) { m_angle = angle; }
 
-			/** \internal */
-			/**     Loops over the number of columns of b and does the following:
-			                1. sets the tolerance and maxIterations
-			                2. Calls the function that has the core solver routine
-			*/
-			template <typename Rhs, typename Dest>
-			void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const
-			{
-				m_iterations = Base::maxIterations();
-				m_error = Base::m_tolerance;
-
-				bool ret = internal::idrs(matrix(), b, x, Base::m_preconditioner, m_iterations, m_error, m_S,m_smoothing,m_angle,m_residual);
-
-				m_info = (!ret) ? NumericalIssue : m_error <= Base::m_tolerance ? Success : NoConvergence;
-			}
-
-			/** Sets the parameter S, indicating the dimension of the shadow space. Default is 4*/
-			void setS(Index S)
-			{
-				if (S < 1)
-				{
-					S = 4;
-				}
-
-				m_S = S;
-			}
-
-			/** Switches off and on smoothing.
-			Residual smoothing results in monotonically decreasing residual norms at
-			the expense of two extra vectors of storage and a few extra vector
-			operations. Although monotonic decrease of the residual norms is a
-			desirable property, the rate of convergence of the unsmoothed process and
-			the smoothed process is basically the same. Default is off */
-			void setSmoothing(bool smoothing)
-			{
-				m_smoothing=smoothing;
-			}
-
-			/** The angle must be a real scalar. In IDR(s), a value for the
-			iteration parameter omega must be chosen in every s+1th step. The most
-			natural choice is to select a value to minimize the norm of the next residual.
-			This corresponds to the parameter omega = 0. In practice, this may lead to
-			values of omega that are so small that the other iteration parameters
-			cannot be computed with sufficient accuracy. In such cases it is better to
-			increase the value of omega sufficiently such that a compromise is reached
-			between accurate computations and reduction of the residual norm. The
-			parameter angle =0.7 (”maintaining the convergence strategy”)
-			results in such a compromise. */
-			void setAngle(RealScalar angle)
-			{
-				m_angle=angle;
-			}
-
-			/** The parameter replace is a logical that determines whether a
-			residual replacement strategy is employed to increase the accuracy of the
-			solution. */
-			void setResidualUpdate(bool update)
-			{
-				m_residual=update;
-			}
-
-	};
+  /** The parameter replace is a logical that determines whether a
+  residual replacement strategy is employed to increase the accuracy of the
+  solution. */
+  void setResidualUpdate(bool update) { m_residual = update; }
+};
 
 }  // namespace Eigen
 
diff --git a/unsupported/Eigen/src/IterativeSolvers/IDRSTABL.h b/unsupported/Eigen/src/IterativeSolvers/IDRSTABL.h
new file mode 100755
index 0000000..712c171
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/IDRSTABL.h
@@ -0,0 +1,476 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2020 Chris Schoutrop <c.e.m.schoutrop@tue.nl>
+// Copyright (C) 2020 Mischa Senders <m.j.senders@student.tue.nl>
+// Copyright (C) 2020 Lex Kuijpers <l.kuijpers@student.tue.nl>
+// Copyright (C) 2020 Jens Wehner <j.wehner@esciencecenter.nl>
+// Copyright (C) 2020 Jan van Dijk <j.v.dijk@tue.nl>
+// Copyright (C) 2020 Adithya Vijaykumar
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+/*
+
+The IDR(S)Stab(L) method is a combination of IDR(S) and BiCGStab(L)
+
+This implementation of IDRSTABL is based on
+1. Aihara, K., Abe, K., & Ishiwata, E. (2014). A variant of IDRstab with
+reliable update strategies for solving sparse linear systems. Journal of
+Computational and Applied Mathematics, 259, 244-258.
+   doi:10.1016/j.cam.2013.08.028
+                2. Aihara, K., Abe, K., & Ishiwata, E. (2015). Preconditioned
+IDRSTABL Algorithms for Solving Nonsymmetric Linear Systems. International
+Journal of Applied Mathematics, 45(3).
+                3. Saad, Y. (2003). Iterative Methods for Sparse Linear Systems:
+Second Edition. Philadelphia, PA: SIAM.
+                4. Sonneveld, P., & Van Gijzen, M. B. (2009). IDR(s): A Family
+of Simple and Fast Algorithms for Solving Large Nonsymmetric Systems of Linear
+Equations. SIAM Journal on Scientific Computing, 31(2), 1035-1062.
+   doi:10.1137/070685804
+                5. Sonneveld, P. (2012). On the convergence behavior of IDR (s)
+and related methods. SIAM Journal on Scientific Computing, 34(5), A2576-A2598.
+
+    Right-preconditioning based on Ref. 3 is implemented here.
+*/
+
+#ifndef EIGEN_IDRSTABL_H
+#define EIGEN_IDRSTABL_H
+
+namespace Eigen {
+
+namespace internal {
+
+template <typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
+bool idrstabl(const MatrixType &mat, const Rhs &rhs, Dest &x, const Preconditioner &precond, Index &iters,
+              typename Dest::RealScalar &tol_error, Index L, Index S) {
+  /*
+    Setup and type definitions.
+  */
+  using numext::abs;
+  using numext::sqrt;
+  typedef typename Dest::Scalar Scalar;
+  typedef typename Dest::RealScalar RealScalar;
+  typedef Matrix<Scalar, Dynamic, 1> VectorType;
+  typedef Matrix<Scalar, Dynamic, Dynamic, ColMajor> DenseMatrixType;
+
+  const Index N = x.rows();
+
+  Index k = 0;  // Iteration counter
+  const Index maxIters = iters;
+
+  const RealScalar rhs_norm = rhs.stableNorm();
+  const RealScalar tol = tol_error * rhs_norm;
+
+  if (rhs_norm == 0) {
+    /*
+      If b==0, then the exact solution is x=0.
+      rhs_norm is needed for other calculations anyways, this exit is a freebie.
+    */
+    x.setZero();
+    tol_error = 0.0;
+    return true;
+  }
+  // Construct decomposition objects beforehand.
+  FullPivLU<DenseMatrixType> lu_solver;
+
+  if (S >= N || L >= N) {
+    /*
+      The matrix is very small, or the choice of L and S is very poor
+      in that case solving directly will be best.
+    */
+    lu_solver.compute(DenseMatrixType(mat));
+    x = lu_solver.solve(rhs);
+    tol_error = (rhs - mat * x).stableNorm() / rhs_norm;
+    return true;
+  }
+
+  // Define maximum sizes to prevent any reallocation later on.
+  DenseMatrixType u(N, L + 1);
+  DenseMatrixType r(N, L + 1);
+
+  DenseMatrixType V(N * (L + 1), S);
+
+  VectorType alpha(S);
+  VectorType gamma(L);
+  VectorType update(N);
+
+  /*
+    Main IDRSTABL algorithm
+  */
+  // Set up the initial residual
+  VectorType x0 = x;
+  r.col(0) = rhs - mat * x;
+  x.setZero();  // The final solution will be x0+x
+
+  tol_error = r.col(0).stableNorm();
+
+  // FOM = Full orthogonalisation method
+  DenseMatrixType h_FOM = DenseMatrixType::Zero(S, S - 1);
+
+  // Construct an initial U matrix of size N x S
+  DenseMatrixType U(N * (L + 1), S);
+  for (Index col_index = 0; col_index < S; ++col_index) {
+    // Arnoldi-like process to generate a set of orthogonal vectors spanning
+    // {u,A*u,A*A*u,...,A^(S-1)*u}. This construction can be combined with the
+    // Full Orthogonalization Method (FOM) from Ref.3 to provide a possible
+    // early exit with no additional MV.
+    if (col_index != 0) {
+      /*
+      Modified Gram-Schmidt strategy:
+      */
+      VectorType w = mat * precond.solve(u.col(0));
+      for (Index i = 0; i < col_index; ++i) {
+        auto v = U.col(i).head(N);
+        h_FOM(i, col_index - 1) = v.dot(w);
+        w -= h_FOM(i, col_index - 1) * v;
+      }
+      u.col(0) = w;
+      h_FOM(col_index, col_index - 1) = u.col(0).stableNorm();
+
+      if (abs(h_FOM(col_index, col_index - 1)) != RealScalar(0)) {
+        /*
+        This only happens if u is NOT exactly zero. In case it is exactly zero
+        it would imply that that this u has no component in the direction of the
+        current residual.
+
+        By then setting u to zero it will not contribute any further (as it
+        should). Whereas attempting to normalize results in division by zero.
+
+        Such cases occur if:
+        1. The basis of dimension <S is sufficient to exactly solve the linear
+        system. I.e. the current residual is in span{r,Ar,...A^{m-1}r}, where
+        (m-1)<=S.
+        2. Two vectors vectors generated from r, Ar,... are (numerically)
+        parallel.
+
+        In case 1, the exact solution to the system can be obtained from the
+        "Full Orthogonalization Method" (Algorithm 6.4 in the book of Saad),
+        without any additional MV.
+
+        Contrary to what one would suspect, the comparison with ==0.0 for
+        floating-point types is intended here. Any arbritary non-zero u is fine
+        to continue, however if u contains either NaN or Inf the algorithm will
+        break down.
+        */
+        u.col(0) /= h_FOM(col_index, col_index - 1);
+      }
+    } else {
+      u.col(0) = r.col(0);
+      u.col(0).normalize();
+    }
+
+    U.col(col_index).head(N) = u.col(0);
+  }
+
+  if (S > 1) {
+    // Check for early FOM exit.
+    Scalar beta = r.col(0).stableNorm();
+    VectorType e1 = VectorType::Zero(S - 1);
+    e1(0) = beta;
+    lu_solver.compute(h_FOM.topLeftCorner(S - 1, S - 1));
+    VectorType y = lu_solver.solve(e1);
+    VectorType x2 = x + U.topLeftCorner(N, S - 1) * y;
+
+    // Using proposition 6.7 in Saad, one MV can be saved to calculate the
+    // residual
+    RealScalar FOM_residual = (h_FOM(S - 1, S - 2) * y(S - 2) * U.col(S - 1).head(N)).stableNorm();
+
+    if (FOM_residual < tol) {
+      // Exit, the FOM algorithm was already accurate enough
+      iters = k;
+      // Convert back to the unpreconditioned solution
+      x = precond.solve(x2);
+      // x contains the updates to x0, add those back to obtain the solution
+      x += x0;
+      tol_error = FOM_residual / rhs_norm;
+      return true;
+    }
+  }
+
+  /*
+    Select an initial (N x S) matrix R0.
+    1. Generate random R0, orthonormalize the result.
+    2. This results in R0, however to save memory and compute we only need the
+    adjoint of R0. This is given by the matrix R_T.\ Additionally, the matrix
+    (mat.adjoint()*R_tilde).adjoint()=R_tilde.adjoint()*mat by the
+    anti-distributivity property of the adjoint. This results in AR_T, which is
+    constant if R_T does not have to be regenerated and can be precomputed.
+    Based on reference 4, this has zero probability in exact arithmetic.
+  */
+
+  // Original IDRSTABL and Kensuke choose S random vectors:
+  const HouseholderQR<DenseMatrixType> qr(DenseMatrixType::Random(N, S));
+  DenseMatrixType R_T = (qr.householderQ() * DenseMatrixType::Identity(N, S)).adjoint();
+  DenseMatrixType AR_T = DenseMatrixType(R_T * mat);
+
+  // Pre-allocate sigma.
+  DenseMatrixType sigma(S, S);
+
+  bool reset_while = false;  // Should the while loop be reset for some reason?
+
+  while (k < maxIters) {
+    for (Index j = 1; j <= L; ++j) {
+      /*
+        The IDR Step
+      */
+      // Construction of the sigma-matrix, and the decomposition of sigma.
+      for (Index i = 0; i < S; ++i) {
+        sigma.col(i).noalias() = AR_T * precond.solve(U.block(N * (j - 1), i, N, 1));
+      }
+
+      lu_solver.compute(sigma);
+      // Obtain the update coefficients alpha
+      if (j == 1) {
+        // alpha=inverse(sigma)*(R_T*r_0);
+        alpha.noalias() = lu_solver.solve(R_T * r.col(0));
+      } else {
+        // alpha=inverse(sigma)*(AR_T*r_{j-2})
+        alpha.noalias() = lu_solver.solve(AR_T * precond.solve(r.col(j - 2)));
+      }
+
+      // Obtain new solution and residual from this update
+      update.noalias() = U.topRows(N) * alpha;
+      r.col(0) -= mat * precond.solve(update);
+      x += update;
+
+      for (Index i = 1; i <= j - 2; ++i) {
+        // This only affects the case L>2
+        r.col(i) -= U.block(N * (i + 1), 0, N, S) * alpha;
+      }
+      if (j > 1) {
+        // r=[r;A*r_{j-2}]
+        r.col(j - 1).noalias() = mat * precond.solve(r.col(j - 2));
+      }
+      tol_error = r.col(0).stableNorm();
+
+      if (tol_error < tol) {
+        // If at this point the algorithm has converged, exit.
+        reset_while = true;
+        break;
+      }
+
+      bool break_normalization = false;
+      for (Index q = 1; q <= S; ++q) {
+        if (q == 1) {
+          // u = r;
+          u.leftCols(j + 1) = r.leftCols(j + 1);
+        } else {
+          // u=[u_1;u_2;...;u_j]
+          u.leftCols(j) = u.middleCols(1, j);
+        }
+
+        // Obtain the update coefficients beta implicitly
+        // beta=lu_sigma.solve(AR_T * u.block(N * (j - 1), 0, N, 1)
+        u.reshaped().head(u.rows() * j) -= U.topRows(N * j) * lu_solver.solve(AR_T * precond.solve(u.col(j - 1)));
+
+        // u=[u;Au_{j-1}]
+        u.col(j).noalias() = mat * precond.solve(u.col(j - 1));
+
+        // Orthonormalize u_j to the columns of V_j(:,1:q-1)
+        if (q > 1) {
+          /*
+          Modified Gram-Schmidt-like procedure to make u orthogonal to the
+          columns of V from Ref. 1.
+
+          The vector mu from Ref. 1 is obtained implicitly:
+          mu=V.block(N * j, 0, N, q - 1).adjoint() * u.block(N * j, 0, N, 1).
+          */
+          for (Index i = 0; i <= q - 2; ++i) {
+            auto v = V.col(i).segment(N * j, N);
+            Scalar h = v.squaredNorm();
+            h = v.dot(u.col(j)) / h;
+            u.reshaped().head(u.rows() * (j + 1)) -= h * V.block(0, i, N * (j + 1), 1);
+          }
+        }
+        // Normalize u and assign to a column of V
+        Scalar normalization_constant = u.col(j).stableNorm();
+        //  If u is exactly zero, this will lead to a NaN. Small, non-zero u is
+        //  fine.
+        if (normalization_constant == RealScalar(0.0)) {
+          break_normalization = true;
+          break;
+        } else {
+          u.leftCols(j + 1) /= normalization_constant;
+        }
+
+        V.block(0, q - 1, N * (j + 1), 1).noalias() = u.reshaped().head(u.rows() * (j + 1));
+      }
+
+      if (break_normalization == false) {
+        U = V;
+      }
+    }
+    if (reset_while) {
+      break;
+    }
+
+    // r=[r;mat*r_{L-1}]
+    r.col(L).noalias() = mat * precond.solve(r.col(L - 1));
+
+    /*
+            The polynomial step
+    */
+    ColPivHouseholderQR<DenseMatrixType> qr_solver(r.rightCols(L));
+    gamma.noalias() = qr_solver.solve(r.col(0));
+
+    // Update solution and residual using the "minimized residual coefficients"
+    update.noalias() = r.leftCols(L) * gamma;
+    x += update;
+    r.col(0) -= mat * precond.solve(update);
+
+    // Update iteration info
+    ++k;
+    tol_error = r.col(0).stableNorm();
+
+    if (tol_error < tol) {
+      // Slightly early exit by moving the criterion before the update of U,
+      // after the main while loop the result of that calculation would not be
+      // needed.
+      break;
+    }
+
+    /*
+    U=U0-sum(gamma_j*U_j)
+    Consider the first iteration. Then U only contains U0, so at the start of
+    the while-loop U should be U0. Therefore only the first N rows of U have to
+    be updated.
+    */
+    for (Index i = 1; i <= L; ++i) {
+      U.topRows(N) -= U.block(N * i, 0, N, S) * gamma(i - 1);
+    }
+  }
+
+  /*
+          Exit after the while loop terminated.
+  */
+  iters = k;
+  // Convert back to the unpreconditioned solution
+  x = precond.solve(x);
+  // x contains the updates to x0, add those back to obtain the solution
+  x += x0;
+  tol_error = tol_error / rhs_norm;
+  return true;
+}
+
+}  // namespace internal
+
+template <typename MatrixType_, typename Preconditioner_ = DiagonalPreconditioner<typename MatrixType_::Scalar>>
+class IDRSTABL;
+
+namespace internal {
+
+template <typename MatrixType_, typename Preconditioner_>
+struct traits<IDRSTABL<MatrixType_, Preconditioner_>> {
+  typedef MatrixType_ MatrixType;
+  typedef Preconditioner_ Preconditioner;
+};
+
+}  // namespace internal
+
+/** \ingroup IterativeLinearSolvers_Module
+ * \brief The IDR(s)STAB(l) is a combination of IDR(s) and BiCGSTAB(l). It is a
+ * short-recurrences Krylov method for sparse square problems. It can outperform
+ * both IDR(s) and BiCGSTAB(l). IDR(s)STAB(l) generally closely follows the
+ * optimal GMRES convergence in terms of the number of Matrix-Vector products.
+ * However, without the increasing cost per iteration of GMRES. IDR(s)STAB(l) is
+ * suitable for both indefinite systems and systems with complex eigenvalues.
+ *
+ * This class allows solving for A.x = b sparse linear problems. The vectors x
+ * and b can be either dense or sparse.
+ *
+ * \tparam MatrixType_ the type of the sparse matrix A, can be a dense or a
+ * sparse matrix. \tparam Preconditioner_ the type of the preconditioner.
+ * Default is DiagonalPreconditioner
+ *
+ * \implsparsesolverconcept
+ *
+ * The maximum number of iterations and tolerance value can be controlled via
+ * the setMaxIterations() and setTolerance() methods. The defaults are the size
+ * of the problem for the maximum number of iterations and
+ * NumTraits<Scalar>::epsilon() for the tolerance.
+ *
+ * The tolerance is the maximum relative residual error: |Ax-b|/|b| for which
+ * the linear system is considered solved.
+ *
+ * \b Performance: When using sparse matrices, best performance is achieved for
+ * a row-major sparse matrix format. Moreover, in this case multi-threading can
+ * be exploited if the user code is compiled with OpenMP enabled. See \ref
+ * TopicMultiThreading for details.
+ *
+ * By default the iterations start with x=0 as an initial guess of the solution.
+ * One can control the start using the solveWithGuess() method.
+ *
+ * IDR(s)STAB(l) can also be used in a matrix-free context, see the following
+ * \link MatrixfreeSolverExample example \endlink.
+ *
+ * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+ */
+
+template <typename MatrixType_, typename Preconditioner_>
+class IDRSTABL : public IterativeSolverBase<IDRSTABL<MatrixType_, Preconditioner_>> {
+  typedef IterativeSolverBase<IDRSTABL> Base;
+  using Base::m_error;
+  using Base::m_info;
+  using Base::m_isInitialized;
+  using Base::m_iterations;
+  using Base::matrix;
+  Index m_L;
+  Index m_S;
+
+ public:
+  typedef MatrixType_ MatrixType;
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename MatrixType::RealScalar RealScalar;
+  typedef Preconditioner_ Preconditioner;
+
+ public:
+  /** Default constructor. */
+  IDRSTABL() : m_L(2), m_S(4) {}
+
+  /**   Initialize the solver with matrix \a A for further \c Ax=b solving.
+
+  This constructor is a shortcut for the default constructor followed
+  by a call to compute().
+
+  \warning this class stores a reference to the matrix A as well as some
+  precomputed values that depend on it. Therefore, if \a A is changed
+  this class becomes invalid. Call compute() to update it with the new
+  matrix A, or modify a copy of A.
+          */
+  template <typename MatrixDerived>
+  explicit IDRSTABL(const EigenBase<MatrixDerived> &A) : Base(A.derived()), m_L(2), m_S(4) {}
+
+  /** \internal */
+  /**     Loops over the number of columns of b and does the following:
+                                  1. sets the tolerance and maxIterations
+                                  2. Calls the function that has the core solver
+     routine
+  */
+  template <typename Rhs, typename Dest>
+  void _solve_vector_with_guess_impl(const Rhs &b, Dest &x) const {
+    m_iterations = Base::maxIterations();
+    m_error = Base::m_tolerance;
+    bool ret = internal::idrstabl(matrix(), b, x, Base::m_preconditioner, m_iterations, m_error, m_L, m_S);
+
+    m_info = (!ret) ? NumericalIssue : m_error <= 10 * Base::m_tolerance ? Success : NoConvergence;
+  }
+
+  /** Sets the parameter L, indicating the amount of minimize residual steps are
+   * used. */
+  void setL(Index L) {
+    eigen_assert(L >= 1 && "L needs to be positive");
+    m_L = L;
+  }
+  /** Sets the parameter S, indicating the dimension of the shadow residual
+   * space.. */
+  void setS(Index S) {
+    eigen_assert(S >= 1 && "S needs to be positive");
+    m_S = S;
+  }
+};
+
+}  // namespace Eigen
+
+#endif /* EIGEN_IDRSTABL_H */
diff --git a/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
index 907e635..70f8ae5 100644
--- a/unsupported/Eigen/src/IterativeSolvers/MINRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
@@ -246,7 +246,7 @@
                               &&  (!NumTraits<Scalar>::IsComplex)
             };
             typedef typename internal::conditional<TransposeInput,Transpose<const ActualMatrixType>, ActualMatrixType const&>::type RowMajorWrapper;
-            EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MatrixWrapper::MatrixFree,UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY);
+            EIGEN_STATIC_ASSERT(internal::check_implication(MatrixWrapper::MatrixFree, UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY);
             typedef typename internal::conditional<UpLo==(Lower|Upper),
                                                   RowMajorWrapper,
                                                   typename MatrixWrapper::template ConstSelfAdjointViewReturnType<UpLo>::Type
diff --git a/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h b/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h
index cc41175..e68d833 100644
--- a/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h
+++ b/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h
@@ -87,8 +87,8 @@
     typedef typename NumTraits<Scalar>::Real RealScalar;
 
     /** type of the equivalent square matrix */
-    typedef Matrix<Scalar, EIGEN_SIZE_MAX(RowsAtCompileTime, ColsAtCompileTime),
-                           EIGEN_SIZE_MAX(RowsAtCompileTime, ColsAtCompileTime) > SquareMatrixType;
+    typedef Matrix<Scalar, internal::max_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime),
+                           internal::max_size_prefer_dynamic(RowsAtCompileTime, ColsAtCompileTime) > SquareMatrixType;
 
     inline const Derived& derived() const {
         return *static_cast<const Derived*> (this);
diff --git a/unsupported/Eigen/src/Skyline/SkylineProduct.h b/unsupported/Eigen/src/Skyline/SkylineProduct.h
index 4b41e10..dab7536 100644
--- a/unsupported/Eigen/src/Skyline/SkylineProduct.h
+++ b/unsupported/Eigen/src/Skyline/SkylineProduct.h
@@ -37,7 +37,7 @@
 
         RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
         ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
-        InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
+        InnerSize = internal::min_size_prefer_fixed(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
 
         MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
         MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt
index f87bacd..0f05c28 100644
--- a/unsupported/test/CMakeLists.txt
+++ b/unsupported/test/CMakeLists.txt
@@ -99,6 +99,8 @@
 ei_add_test(dgmres)
 ei_add_test(minres)
 ei_add_test(idrs)
+ei_add_test(bicgstabl)
+ei_add_test(idrstabl)
 ei_add_test(levenberg_marquardt)
 ei_add_test(kronecker_product)
 ei_add_test(bessel_functions)
diff --git a/unsupported/test/autodiff.cpp b/unsupported/test/autodiff.cpp
index fded7b8..0addf2c 100644
--- a/unsupported/test/autodiff.cpp
+++ b/unsupported/test/autodiff.cpp
@@ -106,7 +106,6 @@
 };
 
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
 /* Test functor for the C++11 features. */
 template <typename Scalar>
 struct integratorFunctor
@@ -186,7 +185,6 @@
     VERIFY_IS_APPROX(y, yref);
     VERIFY_IS_APPROX(j, jref);
 }
-#endif
 
 template<typename Func> void forward_jacobian(const Func& f)
 {
@@ -247,9 +245,7 @@
   CALL_SUBTEST(( forward_jacobian(TestFunc1<double,3,2>()) ));
   CALL_SUBTEST(( forward_jacobian(TestFunc1<double,3,3>()) ));
   CALL_SUBTEST(( forward_jacobian(TestFunc1<double>(3,3)) ));
-#if EIGEN_HAS_VARIADIC_TEMPLATES
   CALL_SUBTEST(( forward_jacobian_cpp11(integratorFunctor<double>(10)) ));
-#endif
 }
 
 
diff --git a/unsupported/test/bicgstabl.cpp b/unsupported/test/bicgstabl.cpp
new file mode 100644
index 0000000..302848c
--- /dev/null
+++ b/unsupported/test/bicgstabl.cpp
@@ -0,0 +1,31 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2012 Kolja Brix <brix@igpm.rwth-aaachen.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "../../test/sparse_solver.h"
+#include <Eigen/IterativeSolvers>
+
+template<typename T> void test_bicgstabl_T()
+{
+  BiCGSTABL<SparseMatrix<T>, DiagonalPreconditioner<T> > bicgstabl_colmajor_diag;
+  BiCGSTABL<SparseMatrix<T>, IncompleteLUT<T> >           bicgstabl_colmajor_ilut;
+
+  //This does not change the tolerance of the test, only the tolerance of the solver.
+  bicgstabl_colmajor_diag.setTolerance(NumTraits<T>::epsilon()*20);
+  bicgstabl_colmajor_ilut.setTolerance(NumTraits<T>::epsilon()*20);
+
+  CALL_SUBTEST( check_sparse_square_solving(bicgstabl_colmajor_diag)  );
+  CALL_SUBTEST( check_sparse_square_solving(bicgstabl_colmajor_ilut)     );
+}
+
+EIGEN_DECLARE_TEST(bicgstabl)
+{
+  CALL_SUBTEST_1(test_bicgstabl_T<double>());
+  CALL_SUBTEST_2(test_bicgstabl_T<std::complex<double> >());
+}
diff --git a/unsupported/test/cxx11_tensor_assign.cpp b/unsupported/test/cxx11_tensor_assign.cpp
index 8e3ca0f..015865e 100644
--- a/unsupported/test/cxx11_tensor_assign.cpp
+++ b/unsupported/test/cxx11_tensor_assign.cpp
@@ -280,7 +280,6 @@
 }
 
 static void test_std_initializers_tensor() {
-#if EIGEN_HAS_VARIADIC_TEMPLATES
   Tensor<int, 1> a(3);
   a.setValues({0, 1, 2});
   VERIFY_IS_EQUAL(a(0), 0);
@@ -349,7 +348,6 @@
   VERIFY_IS_EQUAL(c(2, 1, 1), 25);
   VERIFY_IS_EQUAL(c(2, 1, 2), 26);
   VERIFY_IS_EQUAL(c(2, 1, 3), 27);
-#endif  // EIGEN_HAS_VARIADIC_TEMPLATES
 }
 
 EIGEN_DECLARE_TEST(cxx11_tensor_assign)
diff --git a/unsupported/test/cxx11_tensor_broadcasting.cpp b/unsupported/test/cxx11_tensor_broadcasting.cpp
index cbd92c3..1523657 100644
--- a/unsupported/test/cxx11_tensor_broadcasting.cpp
+++ b/unsupported/test/cxx11_tensor_broadcasting.cpp
@@ -91,15 +91,7 @@
     }
   }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
   tensor.resize(11,3,5);
-#else
-  array<Index, 3> new_dims;
-  new_dims[0] = 11;
-  new_dims[1] = 3;
-  new_dims[2] = 5;
-  tensor.resize(new_dims);
-#endif
 
   tensor.setRandom();
   broadcast = tensor.broadcast(broadcasts);
@@ -148,15 +140,7 @@
     }
   }
 
-#if EIGEN_HAS_VARIADIC_TEMPLATES
   tensor.resize(11,3,5);
-#else
-  array<Index, 3> new_dims;
-  new_dims[0] = 11;
-  new_dims[1] = 3;
-  new_dims[2] = 5;
-  tensor.resize(new_dims);
-#endif
 
   tensor.setRandom();
   broadcast = tensor.broadcast(broadcasts);
diff --git a/unsupported/test/cxx11_tensor_custom_index.cpp b/unsupported/test/cxx11_tensor_custom_index.cpp
index b5dbc97..38ce05b 100644
--- a/unsupported/test/cxx11_tensor_custom_index.cpp
+++ b/unsupported/test/cxx11_tensor_custom_index.cpp
@@ -20,7 +20,6 @@
 template <int DataLayout>
 static void test_map_as_index()
 {
-#ifdef EIGEN_HAS_SFINAE
   Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7);
   tensor.setRandom();
 
@@ -35,14 +34,12 @@
 
   VERIFY_IS_EQUAL(tensor.coeff(coeffC), tensor.coeff(coeff));
   VERIFY_IS_EQUAL(tensor.coeffRef(coeffC), tensor.coeffRef(coeff));
-#endif
 }
 
 
 template <int DataLayout>
 static void test_matrix_as_index()
 {
-#ifdef EIGEN_HAS_SFINAE
   Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7);
   tensor.setRandom();
 
@@ -53,14 +50,12 @@
 
   VERIFY_IS_EQUAL(tensor.coeff(coeffC), tensor.coeff(coeff));
   VERIFY_IS_EQUAL(tensor.coeffRef(coeffC), tensor.coeffRef(coeff));
-#endif
 }
 
 
 template <int DataLayout>
 static void test_varlist_as_index()
 {
-#ifdef EIGEN_HAS_SFINAE
   Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7);
   tensor.setRandom();
 
@@ -68,14 +63,12 @@
 
   VERIFY_IS_EQUAL(tensor.coeff({1,2,4,1}), tensor.coeff(coeff));
   VERIFY_IS_EQUAL(tensor.coeffRef({1,2,4,1}), tensor.coeffRef(coeff));
-#endif
 }
 
 
 template <int DataLayout>
 static void test_sizes_as_index()
 {
-#ifdef EIGEN_HAS_SFINAE
   Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7);
   tensor.setRandom();
 
@@ -84,7 +77,6 @@
 
   VERIFY_IS_EQUAL(tensor.coeff(coeffC), tensor.coeff(coeff));
   VERIFY_IS_EQUAL(tensor.coeffRef(coeffC), tensor.coeffRef(coeff));
-#endif
 }
 
 
diff --git a/unsupported/test/cxx11_tensor_gpu.cu b/unsupported/test/cxx11_tensor_gpu.cu
index 33cbac4..7b3fb5a 100644
--- a/unsupported/test/cxx11_tensor_gpu.cu
+++ b/unsupported/test/cxx11_tensor_gpu.cu
@@ -17,8 +17,6 @@
 
 #include <unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h>
 
-#define EIGEN_GPU_TEST_C99_MATH  1
-
 using Eigen::Tensor;
 
 void test_gpu_nullary() {
@@ -660,7 +658,6 @@
 }
 
 
-#if EIGEN_GPU_TEST_C99_MATH
 template <typename Scalar>
 void test_gpu_lgamma(const Scalar stddev)
 {
@@ -699,7 +696,6 @@
   gpuFree(d_in);
   gpuFree(d_out);
 }
-#endif
 
 template <typename Scalar>
 void test_gpu_digamma()
@@ -1023,7 +1019,6 @@
   gpuFree(d_out);
 }
 
-#if EIGEN_GPU_TEST_C99_MATH
 template <typename Scalar>
 void test_gpu_erf(const Scalar stddev)
 {
@@ -1101,7 +1096,7 @@
   gpuFree(d_in);
   gpuFree(d_out);
 }
-#endif
+
 template <typename Scalar>
 void test_gpu_ndtri()
 {
@@ -1588,7 +1583,6 @@
   CALL_SUBTEST_3(test_gpu_convolution_3d<RowMajor>());
 #endif
 
-#if EIGEN_GPU_TEST_C99_MATH
   // std::erf, std::erfc, and so on where only added in c++11. We use them
   // as a golden reference to validate the results produced by Eigen. Therefore
   // we can only run these tests if we use a c++11 compiler.
@@ -1666,6 +1660,4 @@
   CALL_SUBTEST_6(test_gpu_gamma_sample_der_alpha<float>());
   CALL_SUBTEST_6(test_gpu_gamma_sample_der_alpha<double>());
 #endif
-
-#endif
 }
diff --git a/unsupported/test/idrstabl.cpp b/unsupported/test/idrstabl.cpp
new file mode 100644
index 0000000..7e40dd6
--- /dev/null
+++ b/unsupported/test/idrstabl.cpp
@@ -0,0 +1,28 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "../../test/sparse_solver.h"
+#include <unsupported/Eigen/IterativeSolvers>
+
+template <typename T>
+void test_idrstabl_T() {
+  IDRSTABL<SparseMatrix<T>, DiagonalPreconditioner<T> > idrstabl_colmajor_diag;
+  IDRSTABL<SparseMatrix<T>, IncompleteLUT<T> > idrstabl_colmajor_ilut;
+
+  idrstabl_colmajor_diag.setTolerance(NumTraits<T>::epsilon() * 4);
+  idrstabl_colmajor_ilut.setTolerance(NumTraits<T>::epsilon() * 4);
+
+  CALL_SUBTEST(check_sparse_square_solving(idrstabl_colmajor_diag));
+  CALL_SUBTEST(check_sparse_square_solving(idrstabl_colmajor_ilut));
+}
+
+EIGEN_DECLARE_TEST(idrstabl) {
+  CALL_SUBTEST_1((test_idrstabl_T<double>()));
+  CALL_SUBTEST_2((test_idrstabl_T<std::complex<double> >()));
+}