# Invocation command line: # /gbc-lustre/mackey/specMPI/bin/runspec --ranks 160 --reportable -a validate -o asc,csv,html -n 3 -T base -c default.cfg --define MPI=MPT --define MPI_NICE=HPE MPT 2.17-beta --define ICCV=18.0.0.128 --define PPN=40 --define HW_NODES=4 --define NCHIPS=2 --define CORESPERCHIP=20 --define CORESPERNODE=40 --define HT=ON --define THP=always --define ICC_BUILD=20170811 --define KERNEL=3.10.0-514.2.2.el7.x86_64 --define OS_VER=Red Hat Enterprise Linux Server 7.3 (Maipo) --define CPU_NICE=Intel Xeon Gold 6148 CPU --define SGI_COMPUTE_BUILD=716r171.rhel73-1705051353 --define SGI_COMPUTE_REL=3.5.0 --define CPU_GHZ=2.40 --define CPU_MHZ=2401 --define HW_SWITCHES=1 --define AVX512=1 -i mref --define SIZE=medium medium # output_root was not used for this run ############################################################################ %if !defined(%{ICCV}) || !defined(%{SIZE}) % error must define both SIZE and ICCV %endif #################################################################### # # Platform Description # #################################################################### #include: platform.inc # ----- Begin inclusion of 'platform.inc' ############################################################################ #################################################################### # Adjustments from runtime environment #################################################################### %if '%{HT}' eq 'ON' % define THREADS_PER_CORE 2 % define HYPERTHREADING enabled %else % define THREADS_PER_CORE 1 % define HYPERTHREADING disabled %endif #################################################################### # # Platform Description # #################################################################### hw_avail = Jul-2017 license_num = 1 prepared_by = HPC Performance Engineering sw_avail = Nov-2017 sw_base_ptrsize = 64-bit sw_other = None sw_peak_ptrsize = Not Applicable system_vendor = Hewlett Packard Enterprise node_fileserver_sw_sharedfile = LFS interconnect_IOMPI_hw_switch_1_model = SGI P0002145 interconnect_IOMPI_hw_topo = Enhanced Hypercube interconnect_IOMPI_order = 1 interconnect_IOMPI_purpose = MPI and I/O traffic interconnect_IOMPI_label = InfiniBand (MPI and I/O) interconnect_IOMPI_hw_vendor = Mellanox Technologies and SGI interconnect_IOMPI_hw_switch_1_ports = 36 interconnect_IOMPI_hw_switch_1_firmware = 11.0350.0394 interconnect_IOMPI_hw_switch_1_data_rate = InfiniBand 4X EDR interconnect_IOMPI_hw_switch_1_count = 1 interconnect_IOMPI_hw_model = SGI P0002145 test_sponsor = HPE tester = HPE system_class = Homogeneous sw_preprocessors = None sw_mpi_other = OFED 3.2.2 sw_mpi_library000= HPE Performance Software - Message Passing sw_mpi_library001 = Interface 2.17 node_compute_count = 4 node_compute_hw_adapter_IB_count = 2 node_compute_hw_adapter_IB_data_rate = InfiniBand 4X EDR node_compute_hw_adapter_IB_driver = OFED-3.4-2.1.8.0 node_compute_hw_adapter_IB_firmware = 12.18.1000 node_compute_hw_adapter_IB_interconnect = InfiniBand node_compute_hw_adapter_IB_model = Mellanox MT27700 with ConnectX-4 ASIC node_compute_hw_adapter_IB_ports_used = 1 node_compute_hw_adapter_IB_slot_type = PCIe x16 Gen3 8GT/s node_compute_hw_cpu_char001 = Intel Turbo Boost Technology up to 3.70 GHz node_compute_hw_cpu_mhz = 2400 node_compute_hw_cpu_name = Intel Xeon Gold 6148 node_compute_hw_disk = None node_compute_hw_memory = 192 GB (12 x 16 GB 2Rx4 PC4-2666V-R) node_compute_hw_model = SGI 8600 (Intel Xeon Gold 6148, 2.40 GHz) node_compute_hw_nchips = 2 node_compute_hw_ncores = 40 node_compute_hw_ncoresperchip = 20 node_compute_hw_ncpuorder = 1-2 chips node_compute_hw_nthreadspercore = 2 node_compute_hw_ocache = None node_compute_hw_other = None node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_scache = 1 MB I+D on chip per core node_compute_hw_tcache = 27.5 MB I+D on chip per chip node_compute_hw_vendor = Hewlett Packard Enterprise node_compute_label = HPE XA730i Gen10 Server Node node_compute_order = 2 node_compute_purpose = compute node_compute_sw_localfile = LFS node_compute_sw_os000 = Red Hat Enterprise Linux Server 7.3 (Maipo), node_compute_sw_os001 = Kernel 3.10.0-514.2.2.el7.x86_64 node_compute_sw_other000= SGI Management Center Compute Node 3.5.0, node_compute_sw_other001 = Build 716r171.rhel73-1705051353 node_compute_sw_sharedfile = LFS node_compute_sw_state = Multi-user, run level 3 node_fileserver_count = 4 node_fileserver_hw_adapter_IB_count = 2 node_fileserver_hw_adapter_IB_data_rate = InfiniBand 4X EDR node_fileserver_hw_adapter_IB_driver = OFED-3.3-1.0.0.0 node_fileserver_hw_adapter_IB_firmware = 12.14.2036 node_fileserver_hw_adapter_IB_interconnect = InfiniBand node_fileserver_hw_adapter_IB_model = Mellanox MT27700 with ConnectX-4 ASIC node_fileserver_hw_adapter_IB_ports_used = 1 node_fileserver_hw_adapter_IB_slot_type = PCIe x16 Gen3 node_fileserver_hw_cpu_char000= Intel Turbo Boost Technology up to 3.50 GHz node_fileserver_hw_cpu_char001 = Hyper-Threading Technology disabled node_fileserver_hw_cpu_mhz = 2600 node_fileserver_hw_cpu_name = Intel Xeon E5-2690 v3 node_fileserver_hw_disk000 = 684 TB RAID 6 node_fileserver_hw_disk001 = 48 x 8+2 2TB 7200 RPM node_fileserver_hw_memory = 128 GB (8 x 16 GB 2Rx4 PC4-2133P-R) node_fileserver_hw_model000= Rackable C1104-GP2 (Intel Xeon E5-2690 v3, 2.60 node_fileserver_hw_model001 = GHz) node_fileserver_hw_nchips = 2 node_fileserver_hw_ncores = 24 node_fileserver_hw_ncoresperchip = 12 node_fileserver_hw_ncpuorder = 1-2 chips node_fileserver_hw_nthreadspercore = 1 node_fileserver_hw_ocache = None node_fileserver_hw_other = None node_fileserver_hw_pcache = 32 KB I + 32 KB D on chip per core node_fileserver_hw_scache = 256 KB I+D on chip per core node_fileserver_hw_tcache = 30 MB I+D on chip per chip node_fileserver_hw_vendor = Hewlett Packard Enterprise node_fileserver_label = Lustre FS node_fileserver_order = 3 node_fileserver_purpose = fileserver node_fileserver_sw_localfile = ext3 node_fileserver_sw_os000 = Red Hat Enterprise Linux Server 7.3 (Maipo), node_fileserver_sw_os001 = Kernel 3.10.0-514.2.2.el7.x86_64 node_fileserver_sw_other = None node_fileserver_sw_state = Multi-user, run level 3 sw_c_compiler000 = Intel C Composer XE for Linux, sw_c_compiler001 = Version 18.0.0.128 Build 20170811 sw_cxx_compiler000 = Intel C++ Composer XE for Linux, sw_cxx_compiler001 = Version 18.0.0.128 Build 20170811 sw_f_compiler000 = Intel Fortran Composer XE for Linux, sw_f_compiler001 = Version 18.0.0.128 Build 20170811 system_name000 = SGI 8600 system_name001 = (Intel Xeon Gold 6148, 2.40 GHz) notes_010 = Software environment: notes_015 = export MPI_REQUEST_MAX=65536 notes_020 = export MPI_TYPE_MAX=32768 notes_025 = export MPI_IB_RAILS=2 notes_030 = export MPI_IB_IMM_UPGRADE=false notes_035 = export MPI_CONNECTIONS_THRESHOLD=0 notes_040 = export MPI_IB_DCIS=2 notes_045 = export MPI_IB_HYPER_LAZY=false notes_050 = ulimit -s unlimited notes_055 = notes_060 = BIOS settings: notes_065 = AMI BIOS version SAED7177, 07/17/2017 notes_070 = notes_075 = Job Placement: notes_080 = Each MPI job was assigned to a topologically compact set notes_085 = of nodes. notes_090 = notes_095 = Additional notes regarding interconnect: notes_100 = The Infiniband network consists of two independent planes, notes_105 = with half the switches in the system allocated to each plane. notes_110 = I/O traffic is restricted to one plane, while MPI traffic can notes_115 = use both planes. # ---- End inclusion of '/gbc-lustre/mackey/specMPI/config/platform.inc.cfg' #################################################################### # # defaults # #################################################################### flagsurl000=http://www.spec.org/mpi2007/flags/HPE_x86_64_Intel18_flags.xml action=validate tune=base input=ref teeout=no env_vars=1 no_input_handler=null mean_anyway=1 strict_rundir_verify = 1 makeflags=-j 8 %if !defined(%{MPI}) || '%{MPI}' eq 'MPT' % define mpi_inc_file hpe_mpt.inc %elif '%{MPI}' eq 'INTEL' % define mpi_inc_file mpi_intel.inc %elif '%{MPI}' eq 'OMPI' % define mpi_inc_file mpi_ompi.inc %endif #include: %{mpi_inc_file} # ----- Begin inclusion of 'hpe_mpt.inc' ############################################################################ %if defined(%{AVX512}) % define ARCHCODE avx512 % define ARCHFLAG -xCORE-AVX512 %else % define ARCHCODE avx2 % define ARCHFLAG -xCORE-AVX2 %endif %if defined(%{ZMMHIGH}) && defined(%{AVX512}) ext=hpempi.intel.%{ICCV}.%{ARCHCODE}-zmmhigh.%{SIZE} %else ext=hpempi.intel.%{ICCV}.%{ARCHCODE}.%{SIZE} %endif FC = ifort CC = icc CXX = icpc default=default=default=default: %if defined(%{ZMMHIGH}) && defined(%{AVX512}) FOPTIMIZE = -O3 %{ARCHFLAG} -qopt-zmm-usage=high -no-prec-div -ipo COPTIMIZE = -O3 %{ARCHFLAG} -qopt-zmm-usage=high -no-prec-div -ipo CXXOPTIMIZE = -O3 %{ARCHFLAG} -qopt-zmm-usage=high -no-prec-div -ansi-alias -ipo %else FOPTIMIZE = -O3 %{ARCHFLAG} -no-prec-div -ipo COPTIMIZE = -O3 %{ARCHFLAG} -no-prec-div -ipo CXXOPTIMIZE = -O3 %{ARCHFLAG} -no-prec-div -ansi-alias -ipo %endif EXTRA_LIBS = -lmpi use_submit_for_speed=1 %if defined(%{HOSTLIST}) && defined(%{RPH}) && defined(%{SPILLHOST}) && defined(%{SPILL}) % ifdef %{MPINSIDE} submit=MPI_DSM_CPULIST=%{CPULIST} mpirun %{HOSTLIST} %{RPH} MPInside $command : %{SPILLHOST} %{SPILL} MPInside $command % else submit=MPI_DSM_CPULIST=%{CPULIST} mpirun %{HOSTLIST} %{RPH} $command : %{SPILLHOST} %{SPILL} $command % endif %else % ifdef %{MPINSIDE} % ifndef %{PPN} submit=mpiexec_mpt -n $ranks MPInside $command % else submit=mpiexec_mpt -ppn %{PPN} -n $ranks MPInside $command % endif % else % ifndef %{PPN} submit=mpiexec_mpt -n $ranks $command % else submit=mpiexec_mpt -ppn %{PPN} -n $ranks $command % endif % endif %endif #%elif defined(%{HOSTLIST}) && defined(%{RPH}) && %{SPILL} == 0 #% ifdef %{MPINSIDE} # submit=MPI_DSM_CPULIST=%{CPULIST} mpirun %{HOSTLIST} %{RPH} MPInside $command #% else # submit=MPI_DSM_CPULIST=%{CPULIST} mpirun %{HOSTLIST} %{RPH} $command #% endif #################################################################### # # Peak flags # #################################################################### # Medium Dataset #################################################################### %if '%{SIZE}' eq 'medium' 104.milc=peak=default=default: basepeak=yes 107.leslie3d=peak=default=default: ranks=960 submit=MPI_DSM_CPULIST=20-34,0-14:allhosts mpirun %{TOTALHOSTLIST} 30 $command 113.GemsFDTD=peak=default=default: ranks=384 submit=MPI_DSM_CPULIST=20-25,0-5:allhosts mpirun %{TOTALHOSTLIST} 12 $command 115.fds4=peak=default=default: ranks=704 submit=MPI_DSM_CPULIST=20-30,0-10:allhosts mpirun %{TOTALHOSTLIST} 22 $command 121.pop2=peak=default=default: ranks=448 submit=MPI_DSM_CPULIST=20-26,0-6:allhosts mpirun %{TOTALHOSTLIST} 14 $command 122.tachyon=peak=default=default: ranks=1024 submit=MPI_DSM_CPULIST=20-35,0-15:allhosts mpirun %{TOTALHOSTLIST} 32 $command 126.lammps=peak=default=default: ranks=384 submit=MPI_DSM_CPULIST=20-25,0-5:allhosts mpirun %{TOTALHOSTLIST} 12 $command 127.wrf2=peak=default=default: ranks=896 submit=MPI_DSM_CPULIST=20-33,0-13:allhosts mpirun %{TOTALHOSTLIST} 28 $command 128.GAPgeofem=peak=default=default: ranks=1024 submit=MPI_DSM_CPULIST=20-35,0-15:allhosts mpirun %{TOTALHOSTLIST} 32 $command 129.tera_tf=peak=default=default: ranks=1024 submit=MPI_DSM_CPULIST=20-35,0-15:allhosts mpirun %{TOTALHOSTLIST} 32 $command 130.socorro=peak=default=default: FC = /sw/sdev/intel/parallel_studio_xe_2017_update4/compilers_and_libraries_2017.4.196/linux/bin/intel64/ifort CC = /sw/sdev/intel/parallel_studio_xe_2017_update4/compilers_and_libraries_2017.4.196/linux/bin/intel64/icc CXX = /sw/sdev/intel/parallel_studio_xe_2017_update4/compilers_and_libraries_2017.4.196/linux/bin/intel64/icpc ENV_LD_LIBRARY_PATH=/sw/sdev/mpt-x86_64/2.17-beta/lib:/sw/sdev/intel/parallel_studio_xe_2017_update4/compilers_and_libraries_2017.4.196/linux/compiler/lib/intel64:/opt/intel/mic/coi/host-linux-release/lib:/opt/intel/mic/myo/lib ranks=640 submit=MPI_DSM_CPULIST=20-29,0-9:allhosts mpirun %{TOTALHOSTLIST} 20 $command 132.zeusmp2=peak=default=default: ranks=512 submit=MPI_DSM_CPULIST=20-27,0-7:allhosts mpirun %{TOTALHOSTLIST} 16 $command 137.lu=peak=default=default: ranks=512 submit=MPI_DSM_CPULIST=20-27,0-7:allhosts mpirun %{TOTALHOSTLIST} 16 $command %endif #################################################################### # Large Dataset #################################################################### %if '%{SIZE}' eq 'large' 121.pop2=peak=default=default: ranks=4096 submit=MPI_DSM_CPULIST=20-35,0-15:allhosts mpirun %{TOTALHOSTLIST} 32 $command 122.tachyon=peak=default=default: ranks=4096 FC = /sw/sdev/intel/parallel_studio_xe_2017_update4/compilers_and_libraries_2017.4.196/linux/bin/intel64/ifort CC = /sw/sdev/intel/parallel_studio_xe_2017_update4/compilers_and_libraries_2017.4.196/linux/bin/intel64/icc CXX = /sw/sdev/intel/parallel_studio_xe_2017_update4/compilers_and_libraries_2017.4.196/linux/bin/intel64/icpc ENV_LD_LIBRARY_PATH=/sw/sdev/mpt-x86_64/2.17-beta/lib:/sw/sdev/intel/parallel_studio_xe_2017_update4/compilers_and_libraries_2017.4.196/linux/compiler/lib/intel64:/opt/intel/mic/coi/host-linux-release/lib:/opt/intel/mic/myo/lib submit=MPI_DSM_CPULIST=20-35,0-15:allhosts mpirun %{TOTALHOSTLIST} 32 $command 125.RAxML=peak=default=default: ranks=5120 FC = /sw/sdev/intel/parallel_studio_xe_2017_update4/compilers_and_libraries_2017.4.196/linux/bin/intel64/ifort CC = /sw/sdev/intel/parallel_studio_xe_2017_update4/compilers_and_libraries_2017.4.196/linux/bin/intel64/icc CXX = /sw/sdev/intel/parallel_studio_xe_2017_update4/compilers_and_libraries_2017.4.196/linux/bin/intel64/icpc ENV_LD_LIBRARY_PATH=/sw/sdev/mpt-x86_64/2.17-beta/lib:/sw/sdev/intel/parallel_studio_xe_2017_update4/compilers_and_libraries_2017.4.196/linux/compiler/lib/intel64:/opt/intel/mic/coi/host-linux-release/lib:/opt/intel/mic/myo/lib submit=MPI_DSM_CPULIST=20-39,0-19:allhosts mpirun %{TOTALHOSTLIST} 40 $command 126.lammps=peak=default=default: ranks=5120 submit=MPI_DSM_CPULIST=20-39,0-19:allhosts mpirun %{TOTALHOSTLIST} 40 $command 128.GAPgeofem=peak=default=default: ranks=4096 submit=MPI_DSM_CPULIST=20-35,0-15:allhosts mpirun %{TOTALHOSTLIST} 32 $command 129.tera_tf=peak=default=default: ranks=4096 submit=MPI_DSM_CPULIST=20-35,0-15:allhosts mpirun %{TOTALHOSTLIST} 32 $command 132.zeusmp2=peak=default=default: ranks=2048 submit=MPI_DSM_CPULIST=20-27,0-7:allhosts mpirun %{TOTALHOSTLIST} 16 $command 137.lu=peak=default=default: ranks=2048 submit=MPI_DSM_CPULIST=20-27,0-7:allhosts mpirun %{TOTALHOSTLIST} 16 $command 142.dmilc=peak=default=default: ranks=5120 submit=MPI_DSM_CPULIST=20-39,0-19:allhosts mpirun %{TOTALHOSTLIST} 40 $command 143.dleslie=peak=default=default: ranks=4864 FC = /sw/sdev/intel/parallel_studio_xe_2017_update4/compilers_and_libraries_2017.4.196/linux/bin/intel64/ifort CC = /sw/sdev/intel/parallel_studio_xe_2017_update4/compilers_and_libraries_2017.4.196/linux/bin/intel64/icc CXX = /sw/sdev/intel/parallel_studio_xe_2017_update4/compilers_and_libraries_2017.4.196/linux/bin/intel64/icpc ENV_LD_LIBRARY_PATH=/sw/sdev/mpt-x86_64/2.17-beta/lib:/sw/sdev/intel/parallel_studio_xe_2017_update4/compilers_and_libraries_2017.4.196/linux/compiler/lib/intel64:/opt/intel/mic/coi/host-linux-release/lib:/opt/intel/mic/myo/lib submit=MPI_DSM_CPULIST=20-38,0-18:allhosts mpirun %{TOTALHOSTLIST} 38 $command 145.lGemsFDTD=peak=default=default: ranks=2048 submit=MPI_DSM_CPULIST=20-27,0-7:allhosts mpirun %{TOTALHOSTLIST} 16 $command 147.l2wrf2=peak=default=default: ranks=5120 submit=MPI_DSM_CPULIST=20-39,0-19:allhosts mpirun %{TOTALHOSTLIST} 40 $command %endif #################################################################### # # Portability flags # #################################################################### #################################################################### # Large Dataset #################################################################### %if '%{SIZE}' eq 'large' && defined(%{MPINSIDE}) 121.pop2=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_121.pop2 122.tachyon=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_122.tachyon 125.RAxML=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_125.RAxML 126.lammps=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_126.lammps 128.GAPgeofem=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_128.GAPgeofem 129.tera_tf=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_129.tera_tf 132.zeusmp2=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_132.zeusmp2 137.lu=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_137.lu 142.dmilc=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_142.dmilc 143.dleslie=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_143.dleslie 145.lGemsFDTD=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_145.lGemmsFDTD 147.l2wrf2=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_147.l2wrf2 %endif #################################################################### # Medium Dataset #################################################################### %if '%{SIZE}' eq 'medium' && defined(%{MPINSIDE}) 104.milc=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_104.milc 107.leslie3d=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_107.leslie3d 113.GemsFDTD=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_113.GemsFDTD 115.fds4=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_115.fds4 121.pop2=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_121.pop2 122.tachyon=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_122.tachyon 126.lammps=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_126.lammps 127.wrf2=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_127.wrf2 128.GAPgeofem=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_128.GAPgeofem 129.tera_tf=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_129.tera_tf 130.socorro=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_130.socorro 132.zeusmp2=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_132.zeusmp2 137.lu=default=default=default: ENV_MPINSIDE_OUTPUT_PREFIX=%{MPINSIDE_PREFIX}_137.lu %endif # ---- End inclusion of '/gbc-lustre/mackey/specMPI/config/hpe_mpt.inc.cfg' #################################################################### # # Global Portability flags # #################################################################### 121.pop2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG 127.wrf2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG -DSPEC_MPI_LINUX 130.socorro=default=default=default: notes_base_005=src.alt used: 130.socorro->nullify_ptrs srcalt=nullify_ptrs FPORTABILITY=-assume nostd_intent_in 129.tera_tf=default=default=default: %if '%{SIZE}' eq 'medium' srcalt=add_rank_support notes_base_000=src.alt used: 129.tera_tf->add_rank_support %endif 143.dleslie=default=default=default: srcalt=integer_overflow notes_base_143=src.alt used: 143.dleslie->integer_overflow # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: notes_000 = notes_005 =