blob: 45edfade194af5cf076422e643bf9f4d19e279c2 [file] [log] [blame]
#!/usr/bin/env expect
############################################################################
# Purpose: Test of Slurm functionality
# Test of cpus-per-task option on a single node (--cpus-per-task
# option).
#
# NOTE: This assumes node names are of the form <alpha><number>, where
# the value of <number> indicates the nodes relative location.
# Change the node name parsing logic as needed for other formats.
############################################################################
# Copyright (C) 2002-2006 The Regents of the University of California.
# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
# Written by Morris Jette <jette1@llnl.gov>
# CODE-OCEC-09-009. All rights reserved.
#
# This file is part of Slurm, a resource management program.
# For details, see <https://slurm.schedmd.com/>.
# Please also read the included file: DISCLAIMER.
#
# Slurm is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with Slurm; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
############################################################################
source ./globals
# Various configurations allocate nodes, sockets, cores, cpus or threads;
# not all of which are compatible with this test
if {[check_config_select "linear"]} {
skip "This test is incompatible with select/linear"
}
# Check for OverSubscribe
set oversubscribe [get_partition_param [default_partition] "OverSubscribe"]
if {$oversubscribe == "FORCE"} {
skip "This test is incompatible with forced sharing of nodes"
}
# Get an idle node and their main params
set nodes [get_nodes_by_state]
if {[llength $nodes] == 0} {
skip "Couldn't find an idle node in the default partition"
}
set host [lindex $nodes 0]
set cpu_cnt [get_node_param $host "CPUTot"]
set threads_per_core [get_node_param $host "ThreadsPerCore"]
set core_spec_cnt [get_node_param $host "CoreSpecCount"]
if {$core_spec_cnt == "MISSING"} {
set core_spec_cnt 0
}
log_debug "Found idle node $host with $cpu_cnt processors"
set task_cnt 0
spawn $srun -N1 -w$host --cpus-per-task=1 --exclusive -l -t1 $bin_printenv SLURM_PROCID
expect {
-re "($number):" {
incr task_cnt
exp_continue
}
-re "Task count specification invalid" {
skip "Nodes have too many CPUs for test"
}
timeout {
fail "srun not responding"
}
eof {
wait
}
}
# Convert CPU count to core count if necessary
if {$core_spec_cnt != 0} {
set cpu_cnt [expr $cpu_cnt - ($core_spec_cnt * $threads_per_core)]
}
if {$cpu_cnt != $task_cnt} {
set core_cnt $cpu_cnt
if {$threads_per_core != 0} {
set cpu_cnt [expr $cpu_cnt * $threads_per_core]
}
if {$cpu_cnt == $task_cnt} {
log_debug "Allocated $core_cnt cores and $threads_per_core CPUs per core"
}
}
if {$cpu_cnt != $task_cnt} {
fail "Should have run $cpu_cnt tasks (one per core) instead of $task_cnt tasks. This could be due to memory limit per allocated CPU, MaxCPUsPerNode, CR_ONE_TASK_PER_CORE, or OverSubscribe=FORCE"
}
if {$cpu_cnt < 2} {
skip "The node only has one core"
}
#
# Now verify the --cpus-per-task option
#
set task_cnt 0
spawn $srun -N1 --cpus-per-task=2 --exclusive -l -t1 --nodelist=$host $bin_printenv SLURM_PROCID
expect {
-re "Invalid node name specified" {
skip "Appears you are using multiple slurmd testing. This test won't work in that fashion"
}
-re "$number: *($re_word_str)" {
incr task_cnt
exp_continue
}
timeout {
fail "srun not responding"
}
eof {
wait
}
}
#
# Verify that half the number of tasks were spawned
#
if {$task_cnt != [expr $cpu_cnt / 2]} {
fail "Improper task count for given cpus-per-task. This could be due to memory limit per allocated CPU or OverSubscribe=FORCE"
}