| #!/usr/bin/expect |
| ############################################################################ |
| # Purpose: Test of slaunch functionality |
| # Test of HOSTFILE environment variable. |
| # |
| # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR |
| # "FAILURE: ..." otherwise with an explanation of the failure, OR |
| # anything else indicates a failure mode that must be investigated. |
| ############################################################################ |
| # Copyright (C) 2002-2006 The Regents of the University of California. |
| # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). |
| # Written by Danny Auble <da@llnl.gov> |
| # UCRL-CODE-226842. |
| # |
| # This file is part of SLURM, a resource management program. |
| # For details, see <http://www.llnl.gov/linux/slurm/>. |
| # |
| # SLURM is free software; you can redistribute it and/or modify it under |
| # the terms of the GNU General Public License as published by the Free |
| # Software Foundation; either version 2 of the License, or (at your option) |
| # any later version. |
| # |
| # SLURM is distributed in the hope that it will be useful, but WITHOUT ANY |
| # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
| # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more |
| # details. |
| # |
| # You should have received a copy of the GNU General Public License along |
| # with SLURM; if not, write to the Free Software Foundation, Inc., |
| # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
| ############################################################################ |
| source ./globals |
| |
| set test_id "18.25" |
| set exit_code 0 |
| set node_count 0 |
| set job_id 0 |
| set hostfile "test$test_id.hostfile" |
| |
| print_header $test_id |
| |
| exec $bin_rm -f $hostfile |
| if { [test_front_end] } { |
| send_user "\nWARNING: This test incompatable with front-end systems\n" |
| exit $exit_code |
| } |
| |
| if {[string compare [switch_type] "elan"] == 0} { |
| send_user "\nWARNING: This test incompatable with elan switch\n" |
| exit $exit_code |
| } |
| |
| #find out if we have enough nodes to test functionality |
| set node_count [available_nodes [default_partition]] |
| if { $node_count < 2 } { |
| send_user "WARNING: system must have at least 2 nodes to run this test.\n" |
| exit $exit_code |
| } |
| |
| set timeout $max_job_delay |
| set salloc_pid [spawn $salloc -N2 sleep 30] |
| set salloc_spawn_id $spawn_id |
| expect { |
| -re "Granted job allocation ($number)" { |
| set jobid $expect_out(1,string) |
| } |
| timeout { |
| send_user "\nFAILURE: salloc not responding\n" |
| if {$job_id != 0} { |
| cancel_job $job_id |
| } |
| slow_kill [expr 0 - $salloc_pid] |
| exit 1 |
| } |
| } |
| |
| # |
| # First launch 2 tasks on 2 nodes, and find out the default task layout order |
| # |
| set node0 0 |
| set node1 0 |
| spawn $slaunch --jobid $jobid -l $bin_printenv SLURMD_NODENAME |
| expect { |
| -re "($number): ($alpha_numeric)" { |
| set task_id $expect_out(1,string) |
| if {$task_id == 0} { |
| set node0 $expect_out(2,string) |
| } else { |
| set node1 $expect_out(2,string) |
| } |
| exp_continue |
| } |
| eof { |
| wait |
| } |
| } |
| |
| |
| # |
| # Then create a hostfile laying out the tasks in the opposite order of |
| # the default. |
| # |
| if { $node0 == 0 || $node1 == 0 } { |
| send_user "\nFAILURE: node names not set from previous srun\n" |
| exit 1 |
| } |
| set env(SLURM_HOSTFILE) $hostfile |
| set 1node0 $node0 |
| set 1node1 $node1 |
| set file [open $hostfile "w"] |
| puts $file "$node1" |
| puts $file "$node0" |
| close $file |
| |
| # |
| # slaunch the tasks using the hostfile |
| # |
| spawn $slaunch --jobid $jobid -l --task-layout-file $hostfile $bin_printenv SLURMD_NODENAME |
| expect { |
| -re "($number): ($alpha_numeric)" { |
| set task_id $expect_out(1,string) |
| if {$task_id == 0} { |
| set node0 $expect_out(2,string) |
| } else { |
| set node1 $expect_out(2,string) |
| } |
| exp_continue |
| } |
| eof { |
| wait |
| } |
| } |
| |
| # |
| # Kill salloc (by signalling its process group and killing the sleep 30) |
| # |
| exec $bin_kill -s SIGINT -$salloc_pid |
| set spawn_id $salloc_spawn_id |
| expect { |
| eof { |
| wait |
| } |
| } |
| |
| if { [string compare $node0 $1node1] } { |
| send_user "\n FAILURE: tasks not distributed by hostfile\n" |
| set exit_code 1 |
| } elseif { [string compare $node1 $1node0] } { |
| send_user "\nFAILURE: tasks not distributed by hostfile\n" |
| set exit_code 1 |
| } |
| if {$exit_code == 0} { |
| send_user "\nSUCCESS\n" |
| } |
| exit $exit_code |