| #!/usr/bin/env expect |
| ############################################################################ |
| # Purpose: Test of Slurm functionality |
| # Confirm that a job executes with the proper node count |
| # (--nodes option). |
| ############################################################################ |
| # Copyright (C) 2002-2007 The Regents of the University of California. |
| # Copyright (C) 2008-2010 Lawrence Livermore National Security. |
| # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). |
| # Written by Morris Jette <jette1@llnl.gov> |
| # CODE-OCEC-09-009. All rights reserved. |
| # |
| # This file is part of Slurm, a resource management program. |
| # For details, see <https://slurm.schedmd.com/>. |
| # Please also read the included file: DISCLAIMER. |
| # |
| # Slurm is free software; you can redistribute it and/or modify it under |
| # the terms of the GNU General Public License as published by the Free |
| # Software Foundation; either version 2 of the License, or (at your option) |
| # any later version. |
| # |
| # Slurm is distributed in the hope that it will be useful, but WITHOUT ANY |
| # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
| # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more |
| # details. |
| # |
| # You should have received a copy of the GNU General Public License along |
| # with Slurm; if not, write to the Free Software Foundation, Inc., |
| # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
| ############################################################################ |
| source ./globals |
| |
| set job_id 0 |
| |
| proc cleanup {} { |
| global job_id |
| |
| cancel_job $job_id |
| } |
| |
| # |
| # Submit a 1 node job and validate that we don't get more than one |
| # |
| set host_0 "" |
| set task_cnt 0 |
| set timeout $max_job_delay |
| spawn $salloc -N1-1 -t1 $srun -c 1 -l $bin_printenv SLURMD_NODENAME |
| expect { |
| -re "Granted job allocation ($number)" { |
| set job_id $expect_out(1,string) |
| exp_continue |
| } |
| -re "(Task count specification invalid|configuration not available)" { |
| skip "Test not compatible with this configuration" |
| } |
| -re "($number): *($re_word_str)" { |
| if {$task_cnt == 0} { |
| set host_0 $expect_out(2,string) |
| } |
| incr task_cnt |
| exp_continue |
| } |
| timeout { |
| fail "salloc not responding" |
| } |
| eof { |
| wait |
| } |
| } |
| |
| if {$host_0 eq ""} { |
| fail "Did not get SLURMD_NODENAME of task 0" |
| } |
| |
| set alloc_fail 0 |
| set job_id 0 |
| set task_cnt2 0 |
| spawn $salloc -N1-1 -w DUMMY_HOSTNAME -t1 $srun -n [expr $task_cnt + 1] -l $bin_printenv SLURMD_NODENAME |
| expect { |
| -re "Granted job allocation ($number)" { |
| set job_id $expect_out(1,string) |
| exp_continue |
| } |
| -re "($number):" { |
| incr task_cnt2 |
| exp_continue |
| } |
| -re "Invalid node name specified" { |
| log_info "This error is expected, no worries" |
| set alloc_fail 1 |
| } |
| timeout { |
| fail "salloc not responding" |
| } |
| eof { |
| wait |
| } |
| } |
| subtest {$alloc_fail != 0} "Verify error message was received" |
| subtest {$task_cnt2 == 0} "Verify no steps ran" "Allocated more tasks than processors" |
| |
| # |
| # Submit a 1 node job |
| # |
| set job_id 0 |
| set host_0 "" |
| set host_1 "" |
| |
| spawn $salloc -N1-1 -t1 $srun -l $bin_printenv SLURMD_NODENAME |
| expect { |
| -re "Granted job allocation ($number)" { |
| set job_id $expect_out(1,string) |
| exp_continue |
| } |
| -re "($number): *($re_word_str)" { |
| if {$expect_out(1,string) == 0} { |
| set host_0 $expect_out(2,string) |
| } |
| if {$expect_out(1,string) == 1} { |
| set host_1 $expect_out(2,string) |
| } |
| exp_continue |
| } |
| timeout { |
| fail "salloc not responding" |
| } |
| eof { |
| wait |
| } |
| } |
| |
| # |
| # Verify node count |
| # |
| subtest {$host_0 ne ""} "A task should run on the first host" |
| subtest {$host_1 eq ""} "Only a single task should run" "A task was found running on the second host $host_1" |
| |
| # |
| # Submit a 1 to 3 node job |
| # |
| set job_id 0 |
| set host_0 "" |
| set host_1 "" |
| set host_2 "" |
| set host_3 "" |
| set timeout $max_job_delay |
| spawn $salloc -N1-3 -t1 $srun -l $bin_printenv SLURMD_NODENAME |
| expect { |
| -re "Granted job allocation ($number)" { |
| set job_id $expect_out(1,string) |
| exp_continue |
| } |
| -re "($number): *($re_word_str)" { |
| if {$expect_out(1,string) == 0} { |
| set host_0 $expect_out(2,string) |
| } |
| if {$expect_out(1,string) == 1} { |
| set host_1 $expect_out(2,string) |
| } |
| if {$expect_out(1,string) == 2} { |
| set host_2 $expect_out(2,string) |
| } |
| if {$expect_out(1,string) == 3} { |
| set host_3 $expect_out(2,string) |
| } |
| exp_continue |
| } |
| timeout { |
| fail "salloc not responding" |
| } |
| eof { |
| wait |
| } |
| } |
| |
| # |
| # Verify node count |
| # |
| subtest {$host_0 ne ""} "A task should run on the first host" |
| subtest {$host_3 eq ""} "Only three tasks should run" "A task was found running on the fourth host $host_3" |
| |
| set dup_host 0 |
| if {$host_0 eq $host_1} { |
| set dup_host 1 |
| } |
| if {$host_0 eq $host_2} { |
| set dup_host 1 |
| } |
| if {$host_0 eq $host_3} { |
| set dup_host 1 |
| } |
| if {$dup_host == 1} { |
| fail "Re-used a node in the allocation" |
| } |
| |
| # find out if we have enough nodes to test functionality |
| set node_count [get_partition_param [default_partition] "TotalNodes"] |
| if { $node_count < 2 } { |
| skip "Insufficient nodes in default partition to continue ($node_count < 2)" |
| } |
| |
| # |
| # Submit a 2 to 3 node job |
| # |
| set job_id 0 |
| set host_0 "" |
| set host_1 "" |
| set host_2 "" |
| set host_3 "" |
| set timeout $max_job_delay |
| spawn $salloc -N2-3 -t1 $srun -l $bin_printenv SLURMD_NODENAME |
| expect { |
| -re "Granted job allocation ($number)" { |
| set job_id $expect_out(1,string) |
| exp_continue |
| } |
| -re "salloc: error" { |
| cancel_job $job_id |
| skip "Can't test salloc task distribution" |
| } |
| -re "($number): *($re_word_str)" { |
| if {$expect_out(1,string) == 0} { |
| set host_0 $expect_out(2,string) |
| } |
| if {$expect_out(1,string) == 1} { |
| set host_1 $expect_out(2,string) |
| } |
| if {$expect_out(1,string) == 2} { |
| set host_2 $expect_out(2,string) |
| } |
| if {$expect_out(1,string) == 3} { |
| set host_3 $expect_out(1,string) |
| } |
| exp_continue |
| } |
| timeout { |
| fail "salloc not responding" |
| } |
| eof { |
| wait |
| } |
| } |
| |
| # |
| # Verify node count |
| # |
| subtest {$host_0 ne ""} "A task should run on the first host" |
| subtest {$host_1 ne ""} "A task should run on the second host" |
| subtest {$host_3 eq ""} "Only three tasks should run" "A task was found running on the fourth host $host_3" |
| |
| set dup_host 0 |
| if {$host_0 eq $host_1} { |
| set dup_host 1 |
| } |
| if {$host_0 eq $host_2} { |
| set dup_host 1 |
| } |
| if {$host_0 eq $host_3} { |
| set dup_host 1 |
| } |
| if {$dup_host == 1} { |
| fail "Re-used a node in the allocation" |
| } |