blob: 08e1d962f0313bd31cc91b02780d69e0c144cb32 [file] [log] [blame]
#!/usr/bin/env expect
############################################################################
# Purpose: Test of Slurm functionality
# Validate that a job is held and reason is set to launch
# failed requeued held when the prolog fails
############################################################################
# Copyright (C) SchedMD LLC.
#
# This file is part of Slurm, a resource management program.
# For details, see <http://slurm.schedmd.com/>.
# Please also read the included file: DISCLAIMER.
#
# Slurm is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with Slurm; if not, write to the Free Software Foundation, Inc.
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
############################################################################
source ./globals
set good_prolog "$test_dir/good_prolog"
set bad_prolog "$test_dir/bad_prolog"
set test_node [lindex [get_nodes_by_state] 0]
set job_id 0
set job_list {}
set test_script "$test_dir/script"
if {![is_super_user]} {
skip "Test can only be run as SlurmUser"
}
proc cleanup { } {
global job_list config_file scontrol test_node
# Cancel any lingering jobs
cancel_job $job_list
restore_conf $config_file
reconfigure
run_command "$scontrol update node=$test_node state=idle"
}
make_bash_script $good_prolog "exit 0"
make_bash_script $bad_prolog "exit 1"
make_bash_script $test_script "sleep 20"
# Get the location of the slurm.conf file
set config_dir [get_conf_path]
set config_file $config_dir/slurm.conf
#
# Copy slurm.conf file
#
save_conf $config_file
# Ensure that Prolog and nohold_on_prolog_fail params are removed
exec $bin_sed -i {s/^\(Prolog=\)/#\1/gI} $config_file
exec $bin_sed -i {s/nohold_on_prolog_fail//gI; \
s/^SchedulerParameters=$//gI} $config_file
# Append Prolog config to the slurm.conf
exec $bin_echo "Prolog=$good_prolog" >> $config_file
reconfigure -fail
spawn $sbatch -t1 -N1 -w$test_node --exclusive -o/dev/null $test_script
expect {
-re "Submitted batch job ($number)" {
set job_id $expect_out(1,string)
lappend job_list $job_id
exp_continue
}
timeout {
fail "sbatch is not responding"
}
eof {
wait
}
}
if {$job_id == 0} {
fail "sbatch did not submit job"
}
# Check that job starts running correctly
if {[wait_job_reason $job_id "RUNNING" "None"] != 0} {
fail "Job ($job_id) should be Running and have reason: None"
}
exec $bin_sed -i {s/^\(Prolog=\)/#\1/gI} $config_file
# Append Prolog config to the slurm.conf
exec $bin_echo "Prolog=$bad_prolog" >> $config_file
reconfigure -fail
set job_id 0
spawn $sbatch -t1 -N1 -w$test_node --exclusive -o/dev/null $test_script
expect {
-re "Submitted batch job ($number)" {
set job_id $expect_out(1,string)
lappend job_list $job_id
exp_continue
}
timeout {
fail "sbatch is not responding"
}
eof {
wait
}
}
if {$job_id == 0} {
fail "sbatch did not submit job"
}
# Check that the job is held due to the failed prolog or JobHeldAdmin
# Either state is possible due to race conditions
if {[wait_job_reason $job_id "PENDING" {"launch_failed_requeued_held" \
"JobHeldAdmin"}]} {
fail "Job ($job_id) is not in the correct state or reason."
}
# Check that the node that the job ran on is in fact drained
set match 0
spawn $sinfo -h -o%T -n$test_node
expect {
-re "drained" {
set match 1
exp_continue
}
timeout {
fail "sinfo is not responding"
}
eof {
wait
}
}
if {$match == 0} {
fail "Node ($test_node) was not drained when it should be"
}
log_info "RESUMING NODE"
spawn $scontrol update node=$test_node state=RESUME
#Append SchedulerParameters=nohold_on_prolog_fail to SchedulerParameters
exec $bin_echo "SchedulerParameters=nohold_on_prolog_fail" \
>> $config_dir/slurm.conf
reconfigure -fail
set job_id 0
spawn $sbatch -t1 -N1 -w$test_node --exclusive -o/dev/null $test_script
expect {
-re "Submitted batch job ($number)" {
set job_id $expect_out(1,string)
lappend job_list $job_id
exp_continue
}
timeout {
fail "sbatch is not responding"
}
eof {
wait
}
}
if {$job_id == 0} {
fail "sbatch did not submit job"
}
# Wait a bit for the pending reason to be set
if {[wait_job_reason $job_id] != 0} {
fail "Error waiting for job ($job_id) to start"
}
# Check that the job doesn't have priority 0
set priority 0
spawn $squeue -h -j$job_id -o%t.%Q
expect {
-re "(\[A-Z\]+)\\.($number)" {
set priority $expect_out(2,string)
}
timeout {
fail "squeue is not responding"
}
eof {
wait
}
}
subtest {$priority != 0} "Job with nohold_on_prolog_fail should not be held"