blob: 374993985ed9b53ce504c4853f7c7957d650f9db [file] [log] [blame]
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H
#ifdef EIGEN_HAS_OPENMP
#ifndef EIGEN_OPENMP_ENV_GET_MAX_THREADS
// By default, call the OpenMP runtime to get the maximum number of threads.
#define MAX_OPENMP_THREADS_FUNC omp_get_max_threads
#else
// We have been given an external function to call to get the maximum number
// of threads.
#ifdef EIGEN_OPENMP_ENV_NAMESPACE
namespace EIGEN_OPENMP_ENV_NAMESPACE {
extern int EIGEN_OPENMP_ENV_GET_MAX_THREADS();
}
#define MAX_OPENMP_THREADS_FUNC ::EIGEN_OPENMP_ENV_NAMESPACE::EIGEN_OPENMP_ENV_GET_MAX_THREADS
#else
extern int EIGEN_OPENMP_ENV_GET_MAX_THREADS();
#define MAX_OPENMP_THREADS_FUNC EIGEN_OPENMP_ENV_GET_MAX_THREADS
#endif // EIGEN_OPENMP_ENV_NAMESPACE
#endif // !EIGEN_OPENMP_ENV_GET_MAX_THREADS
#endif // EIGEN_HAS_OPENMP
namespace Eigen {
namespace internal {
inline int getAndMaybeSetMaxThreads(int new_max, bool set) {
#ifdef EIGEN_HAS_OPENMP
static int m_maxThreads_ = MAX_OPENMP_THREADS_FUNC();
if (set) {
m_maxThreads_ = new_max;
}
return m_maxThreads_;
#else
EIGEN_UNUSED_VARIABLE(set);
EIGEN_UNUSED_VARIABLE(new_max);
return 1;
#endif
}
} // namespace internal
/** \returns the max number of threads reserved for Eigen. Always returns 1
unless EIGEN_HAS_OPENMP is set.
* \sa setNbThreads */
inline int nbThreads() {
return internal::getAndMaybeSetMaxThreads(0, false);
}
/** Sets the max number of threads reserved for Eigen. A noop unless
EIGEN_HAS_OPENMP is set.
* \sa nbThreads */
inline void setNbThreads(int new_max_threads) {
internal::getAndMaybeSetMaxThreads(new_max_threads, true);
}
inline void initParallel() {
internal::getAndMaybeSetMaxThreads(0, false);
std::ptrdiff_t l1, l2, l3;
internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
}
namespace internal {
template <typename Index>
struct GemmParallelInfo {
GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
int volatile sync;
int volatile users;
Index lhs_start;
Index lhs_length;
};
template <bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth,
bool transpose) {
// TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types
#if !(defined(EIGEN_HAS_OPENMP)) || defined(EIGEN_USE_BLAS)
// FIXME the transpose variable is only needed to properly split
// the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redisigned anyway.
EIGEN_UNUSED_VARIABLE(transpose);
func(0, rows, 0, cols);
#else
// Dynamically check whether we should enable or disable OpenMP.
// The conditions are:
// - the max number of threads we can create is greater than 1
// - we are not already in a parallel code
// - the sizes are large enough
// 1- are we already in a parallel session?
if ((!Condition) || (omp_get_num_threads() > 1))
return func(0, rows, 0, cols);
Index size = transpose ? rows : cols;
// 2- compute the maximal number of threads from the size of the product:
// FIXME this has to be fine tuned
Index max_threads = std::max<Index>(1, size / 32);
// 3- compute a maximum number of threads based on the total amount of work.
double work = static_cast<double>(rows) * static_cast<double>(cols) *
static_cast<double>(depth);
double kMinTaskSize = 50000; // Heuristic.
max_threads =
std::max<Index>(1, std::min<Index>(max_threads, work / kMinTaskSize));
// 4 - compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), max_threads);
if (threads == 1) return func(0, rows, 0, cols);
func.initParallelSession();
if (transpose) std::swap(rows, cols);
Index blockCols = (cols / threads) & ~Index(0x3);
Index blockRows = (rows / threads);
blockRows = (blockRows / Functor::Traits::mr) * Functor::Traits::mr;
GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[ threads ];
#pragma omp parallel num_threads(threads)
{
Index i = omp_get_thread_num();
Index r0 = i * blockRows;
Index actualBlockRows = (i + 1 == threads) ? rows - r0 : blockRows;
Index c0 = i * blockCols;
Index actualBlockCols = (i + 1 == threads) ? cols - c0 : blockCols;
info[i].lhs_start = r0;
info[i].lhs_length = actualBlockRows;
if (transpose)
func(c0, actualBlockCols, 0, rows, info);
else
func(0, rows, c0, actualBlockCols, info);
}
delete[] info;
#undef MAX_OPENMP_THREADS_FUNC
#endif
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PARALLELIZER_H