| #!/usr/bin/env expect | 
 | ############################################################################ | 
 | # Purpose: Test of Slurm functionality | 
 | #          Confirm that node sharing flags are respected  (--nodelist and | 
 | #          --oversubscribe options). | 
 | ############################################################################ | 
 | # Copyright (C) 2002-2006 The Regents of the University of California. | 
 | # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | 
 | # Written by Morris Jette <jette1@llnl.gov> | 
 | # CODE-OCEC-09-009. All rights reserved. | 
 | # | 
 | # This file is part of Slurm, a resource management program. | 
 | # For details, see <https://slurm.schedmd.com/>. | 
 | # Please also read the included file: DISCLAIMER. | 
 | # | 
 | # Slurm is free software; you can redistribute it and/or modify it under | 
 | # the terms of the GNU General Public License as published by the Free | 
 | # Software Foundation; either version 2 of the License, or (at your option) | 
 | # any later version. | 
 | # | 
 | # Slurm is distributed in the hope that it will be useful, but WITHOUT ANY | 
 | # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | 
 | # FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more | 
 | # details. | 
 | # | 
 | # You should have received a copy of the GNU General Public License along | 
 | # with Slurm; if not, write to the Free Software Foundation, Inc., | 
 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA. | 
 | ############################################################################ | 
 | source ./globals | 
 |  | 
 | set file_err     "$test_dir/error" | 
 | set file_in      "$test_dir/input" | 
 | set file_out     "$test_dir/output" | 
 | set job_id1      0 | 
 | set job_id2      0 | 
 | set nodelist_name "" | 
 | set gpu_tot      0 | 
 | set job_tres_cnt 0 | 
 |  | 
 | proc cleanup {} { | 
 | 	global job_id1 job_id2 | 
 |  | 
 | 	cancel_job [list $job_id1 $job_id2] | 
 | } | 
 |  | 
 | set node_name [get_nodes_by_request "--gres=gpu:2 -n1 -t1"] | 
 | if { [llength $node_name] != 1 } { | 
 | 	skip "This test need to be able to submit jobs with at least --gres=gpu:2" | 
 | } | 
 | if {![param_contains [get_config_param "AccountingStorageTRES"] "gres/gpu"]} { | 
 | 	skip "This test requires AccountingStorageTRES=gres/gpu" | 
 | } | 
 |  | 
 | # Get the total number of GPUs in the test node | 
 | set gres_node [get_node_param $node_name "Gres"] | 
 | set gpu_tot   [dict get [count_gres $gres_node] "gpu"] | 
 |  | 
 | # | 
 | # Submit a job and get the node's NodeName from the nodelist | 
 | # | 
 | set output [run_command_output -fail "$srun -v -N1 -l -t1 $bin_printenv SLURMD_NODENAME"] | 
 | if {![regexp "on host ($re_word_str)," $output - nodelist_name]} { | 
 | 	fail "Did not get hostname of task 0" | 
 | } | 
 |  | 
 | # | 
 | # Delete left-over input script | 
 | # Build input script file | 
 | # | 
 | make_bash_script $file_in "$srun $bin_sleep 5" | 
 |  | 
 | # | 
 | # Submit two jobs to the same node, one with no sharing, the other | 
 | # with sharing permitted. Ensure the first job completes before the | 
 | # second job is started. | 
 | # | 
 | set job_id1 [submit_job -fail "-N1 --exclusive --nodelist=$nodelist_name -t1 --output=$file_out --error=$file_err  $file_in"] | 
 |  | 
 | set partition "dummy" | 
 | set waited 1 | 
 | set timeout [expr $max_job_delay + 5] | 
 | set output [run_command_output -fail "$srun -N1 --nodelist=$nodelist_name -t1 --oversubscribe $scontrol -o show job $job_id1"] | 
 | if {[regexp "JobState=RUN" $output]} { | 
 | 	set waited 0 | 
 | } | 
 | regexp "Partition=($re_word_str)" $output - partition | 
 |  | 
 | if {$waited == 0} { | 
 | 	if {[param_contains [get_partition_param $partition OverSubscribe] "FORCE"]} { | 
 | 		subskip "Test incompatible with OverSubscribe=FORCE" | 
 | 		set waited 1 | 
 | 	} | 
 | } | 
 | if {$waited == 0} { | 
 | 	fail "srun failed to wait for non-sharing job to complete" | 
 | } | 
 |  | 
 | cancel_job $job_id1 | 
 |  | 
 | # | 
 | # Verify that all GPUs are allocated with the --exclusive flag | 
 | # | 
 | set job_id2 [submit_job -fail "-t1 -N1 -w $node_name --gres=gpu --exclusive --output=$file_out $file_in"] | 
 | wait_for_job -fail $job_id2 "RUNNING" | 
 |  | 
 | # Check all GRES of the node were allocated on the job | 
 | check_exclusive_gres $job_id2 $node_name |