| #!/usr/bin/env expect |
| ############################################################################ |
| # Purpose: Test of Slurm functionality |
| # Validate salloc --exclusive with -n will give all cpus on node |
| ############################################################################ |
| # Copyright (C) SchedMD LLC. |
| # |
| # This file is part of Slurm, a resource management program. |
| # For details, see <https://slurm.schedmd.com/>. |
| # Please also read the included file: DISCLAIMER. |
| # |
| # Slurm is free software; you can redistribute it and/or modify it under |
| # the terms of the GNU General Public License as published by the Free |
| # Software Foundation; either version 2 of the License, or (at your option) |
| # any later version. |
| # |
| # Slurm is distributed in the hope that it will be useful, but WITHOUT ANY |
| # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
| # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more |
| # details. |
| # |
| # You should have received a copy of the GNU General Public License along |
| # with Slurm; if not, write to the Free Software Foundation, Inc. |
| # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
| ############################################################################ |
| source ./globals |
| |
| set job_id 0 |
| set nodes "" |
| set cputot 0 |
| set scontrol_cpu 0 |
| set sacct_cpu 0 |
| set gpu_tot 0 |
| set job_tres_cnt 0 |
| set timeout $max_job_delay |
| |
| proc cleanup {} { |
| global job_id |
| |
| cancel_job $job_id |
| } |
| |
| proc check_alloc { } { |
| global scontrol salloc sacct cputot scontrol_cpu sacct_cpu nodes |
| global job_id number re_word_str |
| |
| set node_name [get_job_param $job_id NodeList] |
| |
| # Get number of CPUs from scontrol show job |
| set scontrol_cpu [get_job_param $job_id NumCPUs] |
| if {$scontrol_cpu == 0} { |
| fail "Number of cpus is invalid" |
| } |
| |
| # Wait for sacct to populate the job completion record |
| wait_for_job_acct -fail $job_id end |
| |
| # Get number of allocated CPUs from sacct |
| set sacct_cpu [string trim [run_command_output -fail "$sacct --job=$job_id --allocation --format AllocCPUS --noheader"]] |
| if {$sacct_cpu == 0} { |
| fail "Number of cpus is invalid" |
| } |
| |
| # Get total number of CPUs from scontrol show node |
| set cputot [get_node_param $node_name CPUTot] |
| |
| subtest {$cputot == $scontrol_cpu} "Verify used cpus reported by scontrol" "scontrol reported $scontrol_cpu cpus were used by job $job_id when it should have used $cputot cpus" |
| subtest {$cputot == $sacct_cpu} "Verify used cpus reported by sacct" "sacct reported $sacct_cpu cpus were used by job $job_id when it should have used $cputot cpus" |
| } |
| |
| if {[get_config_param "AccountingStorageType"] ne "accounting_storage/slurmdbd"} { |
| skip "Test invalid without slurmdbd" |
| } |
| |
| set node_name [get_nodes_by_request "--gres=gpu:2 -n1 -t1"] |
| if { [llength $node_name] != 1 } { |
| skip "This test need to be able to submit jobs with at least --gres=gpu:2" |
| } |
| if {![param_contains [get_config_param "AccountingStorageTRES"] "gres/gpu"]} { |
| skip "This test requires AccountingStorageTRES=gres/gpu" |
| } |
| |
| # Get the total number of GPUs in the test node |
| set gres_node [get_node_param $node_name "Gres"] |
| set gpu_tot [dict get [count_gres $gres_node] "gpu"] |
| |
| spawn $salloc -t1 -n1 --exclusive $srun -l $bin_printenv SLURMD_NODENAME |
| expect { |
| -re "Granted job allocation ($number)" { |
| set job_id $expect_out(1,string) |
| exp_continue |
| } |
| -re "($number): ($re_word_str)" { |
| set nodes $expect_out(2,string) |
| exp_continue |
| } |
| timeout { |
| fail "salloc is not responding" |
| } |
| eof { |
| wait |
| } |
| } |
| |
| wait_for_job -fail $job_id DONE |
| check_alloc |
| |
| spawn $salloc -t1 -n1 --mem=100 --exclusive $srun -l $bin_printenv SLURMD_NODENAME |
| expect { |
| -re "Granted job allocation ($number)" { |
| set job_id $expect_out(1,string) |
| exp_continue |
| } |
| -re "($number): ($re_word_str)" { |
| set nodes $expect_out(2,string) |
| exp_continue |
| } |
| timeout { |
| fail "salloc is not responding" |
| } |
| eof { |
| wait |
| } |
| } |
| |
| wait_for_job -fail $job_id DONE |
| check_alloc |
| |
| # |
| # Verify that all GPUs and other GRES are allocated with the --exclusive flag |
| # |
| set job_id 0 |
| |
| spawn $salloc -t1 -n1 -w $node_name --gres=gpu --exclusive $srun -l $bin_printenv SLURMD_NODENAME |
| expect { |
| -re "Granted job allocation ($number)" { |
| set job_id $expect_out(1,string) |
| exp_continue |
| } |
| timeout { |
| fail "salloc not responding" |
| } |
| eof { |
| wait |
| } |
| } |
| |
| # Check all GRES of the node were allocated on the job |
| check_exclusive_gres $job_id $node_name |