blob: 94a105c1cb9286f6d8a20bc071e91e30e6dd983c [file] [log] [blame]
#!/usr/bin/expect
############################################################################
# Purpose: Test of SLURM functionality
# Test of slaunch --cpus-per-task option.
#
# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR
# "WARNING: ..." with an explanation of why the test can't be made, OR
# "FAILURE: ..." otherwise with an explanation of the failure, OR
# anything else indicates a failure mode that must be investigated.
############################################################################
# Copyright (C) 2002-2006 The Regents of the University of California.
# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
# Written by Morris Jette <jette1@llnl.gov>
# UCRL-CODE-226842.
#
# This file is part of SLURM, a resource management program.
# For details, see <http://www.llnl.gov/linux/slurm/>.
#
# SLURM is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with SLURM; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
############################################################################
source ./globals
set test_id "18.29"
set exit_code 0
set file_in "test$test_id.input"
print_header $test_id
if { [test_front_end] } {
send_user "\nWARNING: This test is incompatable with FRONT_END systems\n"
exit 0
}
if {[test_multiple_slurmd] != 0} {
send_user "\nWARNING: This test is incompatable with multiple slurmd systems\n"
exit 0
}
set cpu_cnt 0
set job_id 0
set timeout $max_job_delay
set node_cnt 2
set available [available_nodes [default_partition]]
if {$available < 2} {
send_user "\nWARNING: not enough nodes currently available"
send_user " ($available avail, 2)\n"
exit $exit_code
}
make_bash_script $file_in "
env | grep SLURM_JOB_CPUS_PER_NODE
$bin_hostname"
# Script will print SLURM_JOB_CPUS_PER_NODE, then hold the allocation
# for a long time.
set script_name test18.28.sh
make_bash_script $script_name {
printenv SLURM_JOB_CPUS_PER_NODE
sleep 600
}
set salloc_pid [spawn $salloc -N $node_cnt $slaunch -n 1 $script_name]
set salloc_spawn_id $spawn_id
expect {
-re "Granted job allocation ($number)" {
set jobid $expect_out(1,string)
}
timeout {
send_user "\nFAILURE: salloc not responding\n"
if {$job_id != 0} {
cancel_job $job_id
}
slow_kill [expr 0 - $salloc_pid]
exit 1
}
}
expect {
-re "($number)" {
set cpu_cnt $expect_out(1,string)
}
timeout {
send_user "\nFAILURE: salloc not responding\n"
cancel_job $job_id
slow_kill [expr 0 - $salloc_pid]
exit 1
}
}
file delete $script_name
send_user "cpu count is $cpu_cnt\n"
# Check CPU count
if {$cpu_cnt < 2} {
send_user "\nWARNING: The node only has one CPU\n"
cancel_job $jobid
exec $bin_rm -f $file_in
exit $exit_code
}
#
# Now start the real testing of --cpus-per-task.
#
#
# Test A
# Should run: --cpus-per-task=1, -n=(node_count*cpu count)
#
set task_cnt 0
set pid [spawn $slaunch --jobid $jobid -l --cpus-per-task 1 -n [expr $node_cnt * $cpu_cnt] $bin_printenv SLURMD_NODENAME]
expect {
-re "($number): ($alpha_numeric)" {
incr task_cnt
exp_continue
}
eof {
wait
}
}
set expected_task_cnt [expr $node_cnt * $cpu_cnt]
if {$task_cnt < $expected_task_cnt} {
send_user "\nFAILURE Test A: Fewer tasks ($task_cnt) then expected ($expected_task_cnt)\n"
set exit_code 1
}
if {$task_cnt > $expected_task_cnt} {
send_user "\nFAILURE Test A: More tasks ($task_cnt) then "
send_user "expected ($expected_task_cnt)\n"
set exit_code 1
}
#
# Test B
# Should NOT run: --cpus-per-task=2, -n=(2*cpu count)
#
set task_cnt 0
set pid [spawn $slaunch --jobid $jobid -l --cpus-per-task $cpu_cnt -n [expr $node_cnt * $cpu_cnt] $bin_printenv SLURMD_NODENAME]
expect {
-re "($number): ($alpha_numeric)" {
incr task_cnt
exp_continue
}
"error: Failed creating job step context" {
send_user "This error was expected!\n"
}
eof {
wait
}
}
if {$task_cnt > 0} {
send_user "FAILURE Test B: Test should NOT have run.\n"
set exit_code 1
}
#
# Test C
# Should run: --cpus-per-task=2, -n=(cpu count)
#
set task_cnt 0
set pid [spawn $slaunch --jobid $jobid -l --cpus-per-task $cpu_cnt -n $node_cnt $bin_printenv SLURMD_NODENAME]
expect {
-re "($number): ($alpha_numeric)" {
incr task_cnt
exp_continue
}
eof {
wait
}
}
set expected_task_cnt $node_cnt
if {$task_cnt < $expected_task_cnt} {
send_user "\nFAILURE Test C: Fewer tasks ($task_cnt) then expected ($expected_task_cnt)\n"
set exit_code 1
}
if {$task_cnt > $expected_task_cnt} {
send_user "\nFAILURE Test C: More tasks ($task_cnt) then expected ($expected_task_cnt)\n"
set exit_code 1
}
#
# Clean up the job allocation.
#
cancel_job $jobid
set spawn_id $salloc_spawn_id
expect {
eof {
wait
}
}
if {$exit_code == 0} {
exec $bin_rm -f $file_in
send_user "\nSUCCESS\n"
}
exit $exit_code