# Invocation command line:
# /home/amd/hpc-oneapi-kit/HPC2021/bin/harness/runhpc --flagsurl=/home/amd/hpc-oneapi-kit/HPC2021/Intel_compiler_flags_hpc.2024.xml --config=mpi-oneapi.cfg --tune=all --size=ref --define model=mpi --pmodel=MPI --rank=384 --threads=1 --reportable --iterations=3 small
# output_root was not used for this run
############################################################################
#!/bin/bash
#######################################################################
allow_label_override = no	# Do not build with default settings if label arguement not matched
build_in_build_dir=0		# build in run dir

strict_rundir_verify = yes	#Verify that file contents match exepected checksums. This will be forced on with --reportable flag is set

%ifndef %{label}         	# IF acctype is not set use oneapi
%   define label oneapi
%endif

%ifndef %{model}         	# IF model is not set use mpi
%   define model mpi
pmodel = MPI
%endif

#teeout          = yes
makeflags       = -j 64

######################################################################
# The header section of the config file.  Must appear
# before any instances of "section markers" (see below)
#
# ext = how the binaries you generated will be identified
# tune = specify "base" or "peak" or "all"

label         = %{label}_%{model}
tune          = base
output_format = all

use_submit_for_speed = 1

default:
AR           = ar
ARFLAGS      = cr
FC      = mpiifort      -fc=ifx
CC      = mpiicc        -cc=icx
CXX     = mpiicpc       -cxx=icpx

CC_VERSION_OPTION  = --version
CXX_VERSION_OPTION = --version
FC_VERSION_OPTION  = --version

#######################################################################
default=base=default:
OPTIMIZE        = -Ofast -ipo -mprefer-vector-width=512 -march=skylake-avx512 -mtune=skylake-avx512 -ansi-alias
COPTIMIZE       =
CXXOPTIMIZE     =
FOPTIMIZE       = -nostandard-realloc-lhs -align array64byte
CPORTABILITY	= -DSPEC_LP64
CXXPORTABILITY	= -DSPEC_LP64
LIBS            = -limf -lm

pmodel = MPI

#submit = mpirun --bind-to core:overload-allowed --oversubscribe --mca topo basic -np $ranks $command
submit = mpiexec.hydra -bootstrap ssh -genv OMP_NUM_THREADS $threads -np $ranks -ppn $ranks $command

513.soma_t:
PORTABILITY += -DSPEC_NO_VAR_ARRAY_REDUCE

528.pot3d_t:
CPORTABILITY += -Wno-incompatible-function-pointer-types

628.pot3d_s:
CPORTABILITY += -Wno-incompatible-function-pointer-types

505.lbm_t=peak:
basepeak=1

513.soma_t=peak:
basepeak=1

518.tealeaf_t=peak:
basepeak=1

519.clvleaf_t=peak:
basepeak=1

521.miniswp_t=peak:
basepeak=1

528.pot3d_t=peak:
basepeak=1

532.sph_exa_t=peak:
basepeak=1

534.hpgmgfv_t=peak:
basepeak=1

535.weather_t=peak:
basepeak=1

605.lbm_s=peak:
basepeak=1

613.soma_s=peak:
basepeak=1

618.tealeaf_s=peak:
basepeak=1

621.miniswp_s=peak:
basepeak=1

634.hpgmgfv_s=peak:
basepeak=1

632.sph_exa_s=peak:
basepeak=1

619.clvleaf_s=peak:
basepeak=1

628.pot3d_s=peak:
basepeak=1

635.weather_s=peak:
basepeak=1



# The following section was added automatically, and contains settings that
# did not appear in the original configuration file, but were added to the
# raw file after the run.
default:
flagsurl000 = http://www.spec.org/hpc2021/flags/Intel_compiler_flags_hpc.2024.2024-10-10.xml
sw_other = None
system_class = Homogenous
sw_compiler = Intel oneAPI DPC++/C++ Compiler 2024.2.1
sw_mpi_library = Intel MPI Version 2021.13
sw_mpi_other = None
test_sponsor = Supermicro
license_num = 6569
showtimer = 0
tester = Supermicro
hw_avail = Oct-2024
sw_avail = Apr-2024
prepared_by = Supermicro
system_vendor = Supermicro
system_name = Hyper A+ Server AS -2126HS-TN (AMD EPYC 9965)
sw_os_list = Ubuntu 24.04 LTSKernel 6.8.0-44-generic
hw_vendor_list = Supermicro
hw_total_accel = 0
hw_model_list = Hyper A+ Server AS -2126HS-TN
hw_cpu_name_list = AMD EPYC 9965
hw_accel_vendor_list = None
hw_accel_model_list = None
node_compute_syslbl = Hyper A+ Server AS -2126HS-TN
node_compute_sw_state = Multi-user, run level 3
node_compute_sw_sharedfile = None
node_compute_sw_other = None
node_compute_sw_localfile = ext4
node_compute_purpose = compute
node_compute_order = 1
node_compute_hw_vendor = Supermicro
node_compute_hw_scache = 1 MB I+D on chip per core
node_compute_hw_pcache = 32 KB I + 48 KB D on chip per core
node_compute_hw_other = None
node_compute_hw_ocache = None
node_compute_hw_nthreadspercore = 1
node_compute_hw_ncpuorder = 1,2 chips
node_compute_hw_ncoresperchip = 192
node_compute_hw_ncores = 384
node_compute_hw_nchips = 2
node_compute_hw_model = Hyper A+ Server AS -2126HS-TN
node_compute_hw_disk = 1 x 3.5 TB NVMe SSD
node_compute_hw_cpu_name = AMD EPYC 9965
node_compute_hw_cpu_mhz = 2250
node_compute_hw_cpu_char = Max. Boost Clock upto 3.7GHz
node_compute_hw_adapter_fs_slot_type = None
node_compute_hw_adapter_fs_ports_used = 0
node_compute_hw_adapter_fs_model = None
node_compute_hw_adapter_fs_interconnect = None
node_compute_hw_adapter_fs_firmware = None
node_compute_hw_adapter_fs_driver = None
node_compute_hw_adapter_fs_data_rate = None
node_compute_hw_adapter_fs_count = 0
node_compute_hw_accel_vendor = None
node_compute_hw_accel_type = None
node_compute_hw_accel_model = None
node_compute_hw_accel_ecc = None
node_compute_hw_accel_desc = None
node_compute_hw_accel_count = None
node_compute_hw_accel_connect = None
node_compute_count = 1
node_forgotten_hw_accel_vendor = None
node_forgotten_hw_accel_type = None
node_forgotten_hw_accel_model = None
node_forgotten_hw_accel_ecc = None
node_forgotten_hw_accel_desc = None
node_forgotten_hw_accel_count = None
node_forgotten_hw_accel_connect = None
node_forgotten_syslbl = Supermicro Hyper A+ Server AS -2126HS-TN (H14DSH , AMD EPYC 9965)
node_forgotten_sw_state = Multi-user, run level 3
node_forgotten_sw_sharedfile = NFS share
node_forgotten_sw_other = None
node_forgotten_sw_localfile = ext4
node_forgotten_purpose = compute
node_forgotten_order = 1
node_forgotten_hw_vendor = Supermicro
node_forgotten_hw_scache = 1 MB I+D on chip per core
node_forgotten_hw_pcache = 32 KB I + 48 KB D on chip per core
node_forgotten_hw_other = H14DSH
node_forgotten_hw_ocache = None
node_forgotten_hw_nthreadspercore = 1
node_forgotten_hw_ncpuorder = 1-2 chips
node_forgotten_hw_ncoresperchip = 192
node_forgotten_hw_ncores = 384
node_forgotten_hw_nchips = 2
node_forgotten_hw_disk = 1 x 3.5 TB NVMe
node_forgotten_hw_cpu_name = AMD EPYC 9965
node_forgotten_hw_cpu_mhz = 2250
node_forgotten_hw_cpu_char = Max. Boost Clock upto 3.7GHz
node_forgotten_hw_adapter_fs_slot_type = None
node_forgotten_hw_adapter_fs_ports_used = 0
node_forgotten_hw_adapter_fs_interconnect = None
node_forgotten_hw_adapter_fs_firmware = None
node_forgotten_hw_adapter_fs_driver = None
node_forgotten_hw_adapter_fs_data_rate = None
node_forgotten_hw_adapter_fs_count = 0
node_forgotten_count = 1
node_forgotten_hw_adapter_fs_model000 = Mellanox Technologies MT2910 Family [ConnectX-7]
node_forgotten_hw_adapter_fs_model001 = MCX755106AS-HEAT
node_forgotten_hw_memory000 = 1536 GB (24 x 64 GB 2Rx4 PC5-6400B-R)
node_forgotten_hw_memory001 = running at 6000
node_forgotten_hw_model000 = Hyper A+ Server AS -2126HS-TN
node_forgotten_hw_model001 = (H14DSH , AMD EPYC 9965)
node_forgotten_hw_tcache000 = 384 MB I+D on chip per chip,
node_forgotten_hw_tcache001 = 32 MB shared / 16 cores
node_forgotten_sw_os000 = Ubuntu 24.04 LTS
node_forgotten_sw_os001 = Kernel 6.8.0-44-generic
notes_000 = MPI startup command:
notes_005 =   mpiexec.hydra command was used to start MPI jobs.
node_compute_hw_memory000 = 1536 GB (24 x 64 GB 2Rx4 PC5-6400B-R,
node_compute_hw_memory001 = running at 6000)
node_compute_hw_tcache000 = 384 MB I+D on chip per chip,
node_compute_hw_tcache001 = 32 MB shared / 16 cores
node_compute_sw_os000 = Ubuntu 24.04 LTS
node_compute_sw_os001 = Kernel 6.8.0-44-generic
notes_submit_000 =mpiexec.hydra -bootstrap ssh -genv OMP_NUM_THREADS $threads -np $ranks -ppn $ranks $command