Add 25.11 protocol block for _[un]pack_job_info_members()

Ticket: 23220
diff --git a/src/common/slurm_protocol_pack.c b/src/common/slurm_protocol_pack.c
index a4a47c9..164431c 100644
--- a/src/common/slurm_protocol_pack.c
+++ b/src/common/slurm_protocol_pack.c
@@ -3317,7 +3317,213 @@
 	uint32_t uint32_tmp;
 	bool need_unpack = false;
 
-	if (protocol_version >= SLURM_25_05_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_25_11_PROTOCOL_VERSION) {
+		/* job_record_pack_common */
+		safe_unpackstr(&job->account, buffer);
+		safe_unpackstr(&job->admin_comment, buffer);
+		safe_unpackstr(&job->alloc_node, buffer);
+		safe_unpack32(&job->alloc_sid, buffer);
+		safe_unpack32(&job->array_job_id, buffer);
+		safe_unpack32(&job->array_task_id, buffer);
+		safe_unpack32(&job->assoc_id, buffer);
+
+		safe_unpackstr(&job->batch_features, buffer);
+		safe_unpack16(&job->batch_flag, buffer);
+		safe_unpackstr(&job->batch_host, buffer);
+		safe_unpack64(&job->bitflags, buffer);
+		safe_unpackstr(&job->burst_buffer, buffer);
+		safe_unpackstr(&job->burst_buffer_state, buffer);
+		safe_unpackdouble(&job->billable_tres, buffer);
+
+		safe_unpackstr(&job->comment, buffer);
+		safe_unpackstr(&job->container, buffer);
+		safe_unpackstr(&job->container_id, buffer);
+		safe_unpackstr(&job->cpus_per_tres, buffer);
+
+		safe_unpack_time(&job->deadline, buffer);
+		safe_unpack32(&job->delay_boot, buffer);
+		safe_unpack32(&job->derived_ec, buffer);
+
+		safe_unpack32(&job->exit_code, buffer);
+		safe_unpackstr(&job->extra, buffer);
+
+		safe_unpackstr(&job->failed_node, buffer);
+		/* job_record_pack_fed_details */
+		safe_unpackbool(&need_unpack, buffer);
+		if (need_unpack) {
+			safe_unpackstr(&job->fed_origin_str, buffer);
+			safe_unpack64(&job->fed_siblings_active, buffer);
+			safe_unpackstr(&job->fed_siblings_active_str, buffer);
+			safe_unpack64(&job->fed_siblings_viable, buffer);
+			safe_unpackstr(&job->fed_siblings_viable_str, buffer);
+		}
+		/*******************************/
+
+		safe_unpackstr(&job->gres_total, buffer);
+		safe_unpack32(&job->group_id, buffer);
+
+		safe_unpack32(&job->het_job_id, buffer);
+		safe_unpackstr(&job->het_job_id_set, buffer);
+		safe_unpack32(&job->het_job_offset, buffer);
+
+		safe_unpack32(&job->job_id, buffer);
+		safe_unpack32(&job->job_state, buffer);
+
+		safe_unpack_time(&job->last_sched_eval, buffer);
+		safe_unpackstr(&job->licenses, buffer);
+		safe_unpackstr(&job->licenses_allocated, buffer);
+
+		safe_unpack16(&job->mail_type, buffer);
+		safe_unpackstr(&job->mail_user, buffer);
+		safe_unpackstr(&job->mcs_label, buffer);
+		safe_unpackstr(&job->mem_per_tres, buffer);
+
+		safe_unpackstr(&job->name, buffer);
+		safe_unpackstr(&job->network, buffer);
+
+		safe_unpack_time(&job->preempt_time, buffer);
+		safe_unpack_time(&job->pre_sus_time, buffer);
+		safe_unpack32(&job->priority, buffer);
+		safe_unpack32(&job->profile, buffer);
+
+		safe_unpack8(&job->reboot, buffer);
+		safe_unpack32(&job->req_switch, buffer);
+		safe_unpack_time(&job->resize_time, buffer);
+		safe_unpack16(&job->restart_cnt, buffer);
+		safe_unpackstr(&job->resv_name, buffer);
+		safe_unpackstr(&job->resv_ports, buffer);
+
+		safe_unpackstr(&job->selinux_context, buffer);
+		safe_unpack32(&job->site_factor, buffer);
+		safe_unpack16(&job->start_protocol_ver, buffer);
+		safe_unpackstr(&job->state_desc, buffer);
+		safe_unpack32(&job->state_reason, buffer);
+		safe_unpack_time(&job->suspend_time, buffer);
+		safe_unpackstr(&job->system_comment, buffer);
+
+		safe_unpack32(&job->time_min, buffer);
+		safe_unpackstr(&job->tres_bind, buffer);
+		safe_unpackstr(&job->tres_alloc_str, buffer);
+		safe_unpackstr(&job->tres_req_str, buffer);
+		safe_unpackstr(&job->tres_freq, buffer);
+		safe_unpackstr(&job->tres_per_job, buffer);
+		safe_unpackstr(&job->tres_per_node, buffer);
+		safe_unpackstr(&job->tres_per_socket, buffer);
+		safe_unpackstr(&job->tres_per_task, buffer);
+
+		safe_unpack32(&job->user_id, buffer);
+		safe_unpackstr(&job->user_name, buffer);
+
+		safe_unpack32(&job->wait4switch, buffer);
+		safe_unpackstr(&job->wckey, buffer);
+		/**************************************/
+
+
+		/* The array_task_str value is stored in slurmctld and passed
+		 * here in hex format for best scalability. Its format needs
+		 * to be converted to human readable form by the client. */
+		safe_unpackstr(&job->array_task_str, buffer);
+		safe_unpack32(&job->array_max_tasks, buffer);
+		xlate_array_task_str(&job->array_task_str, job->array_max_tasks,
+				     &job->array_bitmap);
+
+		safe_unpack32(&job->time_limit, buffer);
+
+		safe_unpack_time(&job->start_time, buffer);
+		safe_unpack_time(&job->end_time, buffer);
+		safe_unpack32_array(&job->priority_array, &uint32_tmp, buffer);
+		safe_unpackstr(&job->priority_array_names, buffer);
+		safe_unpackstr(&job->cluster, buffer);
+		safe_unpackstr(&job->nodes, buffer);
+		safe_unpackstr(&job->sched_nodes, buffer);
+		safe_unpackstr(&job->partition, buffer);
+		safe_unpackstr(&job->qos, buffer);
+		safe_unpack_time(&job->preemptable_time, buffer);
+
+		if (unpack_job_resources(&job->job_resrcs, buffer,
+					 protocol_version))
+			goto unpack_error;
+		safe_unpackstr_array(&job->gres_detail_str,
+				     &job->gres_detail_cnt, buffer);
+
+		unpack_bit_str_hex_as_inx(&job->node_inx, buffer);
+
+		/*** unpack default job details ***/
+		safe_unpackbool(&need_unpack, buffer);
+		if (!need_unpack) {
+			safe_unpack32(&job->num_cpus, buffer);
+			safe_unpack32(&job->num_nodes, buffer);
+			safe_unpack32(&job->nice, buffer);
+		} else {
+			/* job_record_pack_details_common */
+			safe_unpack_time(&job->accrue_time, buffer);
+			safe_unpack_time(&job->eligible_time, buffer);
+			safe_unpackstr(&job->cluster_features, buffer);
+			safe_unpack32(&job->cpu_freq_gov, buffer);
+			safe_unpack32(&job->cpu_freq_max, buffer);
+			safe_unpack32(&job->cpu_freq_min, buffer);
+			safe_unpackstr(&job->dependency, buffer);
+			unpack_bit_str_hex_as_fmt_str(&job->job_size_str,
+						      buffer);
+			safe_unpack32(&job->nice, buffer);
+			safe_unpack16(&job->ntasks_per_node, buffer);
+			safe_unpack16(&job->ntasks_per_tres, buffer);
+			safe_unpack16(&job->requeue, buffer);
+			safe_unpack16(&job->segment_size, buffer);
+			safe_unpack_time(&job->submit_time, buffer);
+			safe_unpackstr(&job->work_dir, buffer);
+			/**********************************/
+
+			safe_unpackstr(&job->features, buffer);
+			safe_unpackstr(&job->prefer, buffer);
+			safe_unpackstr(&job->command, buffer);
+
+			safe_unpack32(&job->num_cpus, buffer);
+			safe_unpack32(&job->max_cpus, buffer);
+			safe_unpack32(&job->num_nodes, buffer);
+			safe_unpack32(&job->max_nodes, buffer);
+			safe_unpack32(&job->num_tasks, buffer);
+
+			safe_unpack16(&job->shared, buffer);
+
+			safe_unpackstr(&job->cronspec, buffer);
+		}
+
+		/*** unpack pending job details ***/
+		safe_unpack16(&job->contiguous, buffer);
+		safe_unpack16(&job->core_spec, buffer);
+		safe_unpack16(&job->cpus_per_task, buffer);
+		safe_unpack16(&job->pn_min_cpus, buffer);
+
+		safe_unpack64(&job->pn_min_memory, buffer);
+		safe_unpack32(&job->pn_min_tmp_disk, buffer);
+		safe_unpack16(&job->oom_kill_step, buffer);
+		safe_unpackstr(&job->req_nodes, buffer);
+
+		unpack_bit_str_hex_as_inx(&job->req_node_inx, buffer);
+
+		safe_unpackstr(&job->exc_nodes, buffer);
+
+		unpack_bit_str_hex_as_inx(&job->exc_node_inx, buffer);
+
+		safe_unpackstr(&job->std_err, buffer);
+		safe_unpackstr(&job->std_in, buffer);
+		safe_unpackstr(&job->std_out, buffer);
+
+		if (unpack_multi_core_data(&mc_ptr, buffer, protocol_version))
+			goto unpack_error;
+		if (mc_ptr) {
+			job->boards_per_node = mc_ptr->boards_per_node;
+			job->sockets_per_board = mc_ptr->sockets_per_board;
+			job->sockets_per_node = mc_ptr->sockets_per_node;
+			job->cores_per_socket = mc_ptr->cores_per_socket;
+			job->threads_per_core = mc_ptr->threads_per_core;
+			job->ntasks_per_board = mc_ptr->ntasks_per_board;
+			job->ntasks_per_socket = mc_ptr->ntasks_per_socket;
+			job->ntasks_per_core = mc_ptr->ntasks_per_core;
+			xfree(mc_ptr);
+		}
+	} else if (protocol_version >= SLURM_25_05_PROTOCOL_VERSION) {
 		/* job_record_pack_common */
 		safe_unpackstr(&job->account, buffer);
 		safe_unpackstr(&job->admin_comment, buffer);
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index c7675e8..5204720 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -10946,7 +10946,140 @@
 	} else
 		_find_node_config(&max_cpu_cnt, &max_core_cnt);
 
-	if (protocol_version >= SLURM_24_11_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_25_11_PROTOCOL_VERSION) {
+		if (!detail_ptr) {
+			packbool(false, buffer);
+
+			if (job_ptr->total_cpus)
+				pack32(job_ptr->total_cpus, buffer);
+			else
+				pack32(job_ptr->cpu_cnt, buffer);
+
+			pack32(job_ptr->node_cnt, buffer);
+			pack32(NICE_OFFSET, buffer); /* Best guess */
+			return;
+		}
+		packbool(true, buffer);
+		job_record_pack_details_common(detail_ptr, buffer,
+					       protocol_version);
+
+		if (!IS_JOB_PENDING(job_ptr)) {
+			packstr(detail_ptr->features_use, buffer);
+			packnull(buffer);
+		} else {
+			packstr(detail_ptr->features, buffer);
+			packstr(detail_ptr->prefer, buffer);
+		}
+
+		if (detail_ptr->argv)
+			packstr(detail_ptr->argv[0], buffer);
+		else
+			packnull(buffer);
+
+		if (IS_JOB_COMPLETING(job_ptr) && job_ptr->cpu_cnt) {
+			pack32(job_ptr->cpu_cnt, buffer);
+			pack32((uint32_t) 0, buffer);
+		} else if (job_ptr->total_cpus &&
+			   !IS_JOB_PENDING(job_ptr)) {
+			/* If job is PENDING ignore total_cpus,
+			 * which may have been set by previous run
+			 * followed by job requeue. */
+			pack32(job_ptr->total_cpus, buffer);
+			pack32((uint32_t) 0, buffer);
+		} else {
+			pack32(detail_ptr->min_cpus, buffer);
+			if (detail_ptr->max_cpus != NO_VAL)
+				pack32(detail_ptr->max_cpus, buffer);
+			else
+				pack32((uint32_t) 0, buffer);
+		}
+
+		if (IS_JOB_COMPLETING(job_ptr) && job_ptr->node_cnt) {
+			pack32(job_ptr->node_cnt, buffer);
+			pack32((uint32_t) 0, buffer);
+		} else if (job_ptr->total_nodes) {
+			pack32(job_ptr->total_nodes, buffer);
+			pack32((uint32_t) 0, buffer);
+		} else if (job_ptr->node_cnt_wag) {
+			/* This should catch everything else, but
+			 * just in case this is 0 (startup or
+			 * whatever) we will keep the rest of
+			 * this if statement around.
+			 */
+			pack32(job_ptr->node_cnt_wag, buffer);
+			pack32((uint32_t) detail_ptr->max_nodes,
+			       buffer);
+		} else if (detail_ptr->ntasks_per_node) {
+			/* min_nodes based upon task count and ntasks
+			 * per node */
+			uint32_t min_nodes;
+			min_nodes = detail_ptr->num_tasks /
+				detail_ptr->ntasks_per_node;
+			min_nodes = MAX(min_nodes,
+					detail_ptr->min_nodes);
+			pack32(min_nodes, buffer);
+			pack32(detail_ptr->max_nodes, buffer);
+		} else if (detail_ptr->cpus_per_task > 1) {
+			/* min_nodes based upon task count and cpus
+			 * per task */
+			uint32_t ntasks_per_node, min_nodes;
+			ntasks_per_node = max_cpu_cnt /
+				detail_ptr->cpus_per_task;
+			ntasks_per_node = MAX(ntasks_per_node, 1);
+			min_nodes = detail_ptr->num_tasks /
+				ntasks_per_node;
+			min_nodes = MAX(min_nodes,
+					detail_ptr->min_nodes);
+			pack32(min_nodes, buffer);
+			pack32(detail_ptr->max_nodes, buffer);
+		} else if (detail_ptr->mc_ptr &&
+			   detail_ptr->mc_ptr->ntasks_per_core &&
+			   (detail_ptr->mc_ptr->ntasks_per_core
+			    != INFINITE16)) {
+			/* min_nodes based upon task count and ntasks
+			 * per core */
+			uint32_t min_cores, min_nodes;
+			min_cores = ROUNDUP(detail_ptr->num_tasks,
+					    detail_ptr->mc_ptr->
+					    ntasks_per_core);
+			min_nodes = ROUNDUP(min_cores, max_core_cnt);
+			min_nodes = MAX(min_nodes,
+					detail_ptr->min_nodes);
+			pack32(min_nodes, buffer);
+			pack32(detail_ptr->max_nodes, buffer);
+		} else {
+			/* min_nodes based upon task count only */
+			uint32_t min_nodes;
+			uint32_t max_nodes;
+
+			min_nodes = ROUNDUP(detail_ptr->num_tasks,
+					    max_cpu_cnt);
+			min_nodes = MAX(min_nodes,
+					detail_ptr->min_nodes);
+			max_nodes = MAX(min_nodes,
+					detail_ptr->max_nodes);
+			pack32(min_nodes, buffer);
+			pack32(max_nodes, buffer);
+		}
+		if (detail_ptr->num_tasks)
+			pack32(detail_ptr->num_tasks, buffer);
+		else if (IS_JOB_PENDING(job_ptr))
+			pack32(detail_ptr->min_nodes, buffer);
+		else if (job_ptr->tres_alloc_cnt)
+			pack32((uint32_t)
+			       job_ptr->tres_alloc_cnt[TRES_ARRAY_NODE],
+			       buffer);
+		else
+			pack32(NO_VAL, buffer);
+
+		pack16(shared, buffer);
+
+		if (detail_ptr->crontab_entry)
+			packstr(detail_ptr->crontab_entry->cronspec,
+				buffer);
+		else
+			packnull(buffer);
+	} else if (protocol_version >= SLURM_24_11_PROTOCOL_VERSION) {
 		if (!detail_ptr) {
 			packbool(false, buffer);