From 487f56284504c89c0d3cd73ce4b50528c64352be Mon Sep 17 00:00:00 2001 From: Alex Lindsay Date: Tue, 6 May 2025 16:12:27 -0600 Subject: [PATCH 1/3] If user hasn't specified mpi thread type provide default with slate Slate requires MPI_THREAD_MULTIPLE so we'll request this level if the user hasn't requested otherwise --- src/base/libmesh.C | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/base/libmesh.C b/src/base/libmesh.C index fbc903e607..df410ca5a5 100644 --- a/src/base/libmesh.C +++ b/src/base/libmesh.C @@ -442,30 +442,32 @@ LibMeshInit::LibMeshInit (int argc, const char * const * argv, int mpi_thread_request = using_threads; const auto mpi_thread_type = libMesh::command_line_value("--mpi-thread-type", std::string("")); if (mpi_thread_type.empty()) - check_empty_command_line_value(*command_line, "--mpi-thread-type"); + { + check_empty_command_line_value(*command_line, "--mpi-thread-type"); +#if defined(PETSC_HAVE_STRUMPACK) && defined(PETSC_HAVE_SLATE) + mpi_thread_request = 3; +#endif + } else { - int cli_mpi_thread_request; if (mpi_thread_type == "single") { if (using_threads) libmesh_error_msg("We are using threads, so we require more mpi thread support " "than '--mpi-thread-type=single'"); - cli_mpi_thread_request = 0; + mpi_thread_request = 0; } else if (mpi_thread_type == "funneled") - cli_mpi_thread_request = 1; + mpi_thread_request = 1; else if (mpi_thread_type == "serialized") - cli_mpi_thread_request = 2; + mpi_thread_request = 2; else if (mpi_thread_type == "multiple") - cli_mpi_thread_request = 3; + mpi_thread_request = 3; else libmesh_error_msg( "Unsupported mpi thread type '" << mpi_thread_type << "'. Allowed options are 'single', 'funneled', 'serialized', and 'multiple'"); - - mpi_thread_request = cli_mpi_thread_request; } this->_timpi_init = From 4a012a7a0d01ed1047d353a39c85b9698ff26a86 Mon Sep 17 00:00:00 2001 From: Alex Lindsay Date: Wed, 7 May 2025 17:36:32 -0600 Subject: [PATCH 2/3] Add comment --- src/base/libmesh.C | 1 + 1 file changed, 1 insertion(+) diff --git a/src/base/libmesh.C b/src/base/libmesh.C index df410ca5a5..5f89b3042e 100644 --- a/src/base/libmesh.C +++ b/src/base/libmesh.C @@ -445,6 +445,7 @@ LibMeshInit::LibMeshInit (int argc, const char * const * argv, { check_empty_command_line_value(*command_line, "--mpi-thread-type"); #if defined(PETSC_HAVE_STRUMPACK) && defined(PETSC_HAVE_SLATE) + // Slate always requests MPI_THREAD_MULTIPLE mpi_thread_request = 3; #endif } From 6eac17393ecdc43e0c54a0321b2e29efe476f1ae Mon Sep 17 00:00:00 2001 From: Alex Lindsay Date: Wed, 7 May 2025 17:45:10 -0600 Subject: [PATCH 3/3] Add more comment --- src/base/libmesh.C | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/base/libmesh.C b/src/base/libmesh.C index 5f89b3042e..a2c1fa3419 100644 --- a/src/base/libmesh.C +++ b/src/base/libmesh.C @@ -445,7 +445,12 @@ LibMeshInit::LibMeshInit (int argc, const char * const * argv, { check_empty_command_line_value(*command_line, "--mpi-thread-type"); #if defined(PETSC_HAVE_STRUMPACK) && defined(PETSC_HAVE_SLATE) - // Slate always requests MPI_THREAD_MULTIPLE + // For GPU computations, the solver strumpack uses slate which always requests + // MPI_THREAD_MULTIPLE. The solution here is not perfect because the run may never be + // using strumpack, but we believe it's better to force the MPI library to use locks + // whenever it accesses the message queue, that is, when processing any sends and receive, + // than it is to require users to pre-announce/signal what solvers they are using through + // --mpi-thread-type mpi_thread_request = 3; #endif }