# Invocation command line: # /work/bw579735/SPEC/mpi/bin/runspec --config=claix_intelmpi --reportable --iterations=3 --ranks 384 --flagsurl=/work/bw579735/SPEC/mpi_new/result/claix-mpi.xml -I medium # output_root was not used for this run ############################################################################ # ##################################################################### # # Config file to run SPEC MPI2007 with Intel Software Toolchain # (Intel Compiler 16.0.2 and Intel MPI 5.1.3.181) # ##################################################################### ext = rwth env_vars = 1 basepeak = 1 makeflags = -j 8 output_format = all FC = mpiifort CC = mpiicc CXX = mpiicpc ENV_MPIR_CVAR_COLL_ALIAS_CHECK=0 ENV_I_MPI_COMPATIBILITY=3 ##################################################################### # Portability flags ##################################################################### 121.pop2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG 126.lammps=default=default=default: CXXPORTABILITY = -DMPICH_IGNORE_CXX_SEEK 127.wrf2=default=default=default: CPORTABILITY = -DSPEC_MPI_CASE_FLAG -DSPEC_MPI_LINUX 130.socorro=default=default=default: srcalt=nullify_ptrs FPORTABILITY = -assume nostd_intent_in ################################################################# # Optimization flags ################################################################# default=default=default=default: OPTIMIZE = -O3 -xSSE4.2 -no-prec-div #OPTIMIZE = -O3 -xCORE-AVX2 -no-prec-div submit = \$MPIEXEC \$FLAGS_MPI_BATCH -genv I_MPI_COMPATIBILITY=3 $command ################################################################# # Notes ################################################################# company_name = RWTH Univeristy Aachen test_sponsor = RWTH University Aachen license_num = 055A tester = Bo Wang hw_avail = Oct-2016 sw_avail = Oct-2016 prepared_by = Bo Wang system_vendor = NEC sw_parallel_other = None node_compute_sw_other = None system_name000 = NEC HPC1812Rg-2 (Intel Xeon E5-2650 v4, 2.20 GHz, system_name001 = DDR4-2400 MHz, SMT ON, Turbo ON) # # Computation node info # node_compute_label = NEC HPC node_compute_order = 1 node_compute_count = 16 node_compute_purpose = compute node_compute_hw_vendor = Intel node_compute_hw_model = NEC HPC 1812Rg node_compute_hw_cpu_name = Intel Xeon E5-2650 v4 node_compute_hw_ncpuorder = 1-2 chips node_compute_hw_nchips = 2 node_compute_hw_ncores = 24 node_compute_hw_ncoresperchip = 12 node_compute_hw_nthreadspercore = 2 node_compute_hw_cpu_char000 = 12 core, 2.2 GHz, 9.6 GT/s QPI node_compute_hw_cpu_char001 = Intel Turbo Boost Technology up to 2.9 GHz node_compute_hw_cpu_char002 = Hyper-Threading Technology enabled node_compute_hw_cpu_mhz = 2200 node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_scache = 256 KB I+D on chip per core node_compute_hw_tcache000 = 30 MB I+D on chip per chip node_compute_hw_tcache001 = shared / 12 cores node_compute_hw_ocache = None node_compute_hw_memory = 128 GB (8 x 16 GB 2Rx8 PC4-2400T-R) node_compute_hw_disk = SATA, Samsung SM863, 120GB, SSD node_compute_hw_other = None node_compute_hw_adapter_opa_model000 = Intel Omni-Path Host Fabric Interface Adapter 100 node_compute_hw_adapter_opa_model001 = Series 1 Port PCIe x8 node_compute_hw_adapter_opa_count = 1 node_compute_hw_adapter_opa_slot_type = PCI-E x8 node_compute_hw_adapter_opa_data_rate = 58Gb/s node_compute_hw_adapter_opa_ports_used = 1 node_compute_hw_adapter_opa_interconnect = Omni-Path node_compute_hw_adapter_opa_driver = Intel Omni-Path Host Fabric Interface node_compute_hw_adapter_opa_firmware = 2.33.5100 node_compute_sw_os000 = CentOS Linux release 7.3.1611 (Core) node_compute_sw_os001 = Kernel 3.10.0-514.26.1.el7.x86_64 node_compute_sw_localfile = Linux/xfs node_compute_sw_sharedfile = NFSv3 node_compute_sw_state = Multi-User, run level 3 # # Fileserver node info # node_fileserver_label = NFSv3 node_fileserver_order = 2 node_fileserver_count = 1 node_fileserver_purpose = fileserver node_fileserver_hw_vendor = NETAPP node_fileserver_hw_model = FAS6240 node_fileserver_hw_cpu_name = Intel Xeon CPU X5670 node_fileserver_hw_ncpuorder = 1-2 chips node_fileserver_hw_nchips = 2 node_fileserver_hw_ncores = 12 node_fileserver_hw_ncoresperchip = 6 node_fileserver_hw_nthreadspercore = 2 node_fileserver_hw_cpu_char = None node_fileserver_hw_cpu_mhz = 2930 node_fileserver_hw_pcache = 32 KB I + 32 KB D on chip per core node_fileserver_hw_scache = 256 KB I+D on chip per core node_fileserver_hw_tcache = 12 MB I+D on chip per chip node_fileserver_hw_ocache = None node_fileserver_hw_memory = 96 GB node_fileserver_hw_disk = 216 disks, 2 TB/disk, 432TB total node_fileserver_hw_other = None node_fileserver_hw_adapter_fs_model000 = 10 Gigabit Ethernet Controller node_fileserver_hw_adapter_fs_model001 = IX1-SFP+ node_fileserver_hw_adapter_fs_count = 2 node_fileserver_hw_adapter_fs_slot_type = PCI-Express x8 node_fileserver_hw_adapter_fs_data_rate = 10Gbps Ethernet node_fileserver_hw_adapter_fs_ports_used = 2 node_fileserver_hw_adapter_fs_interconnect = Ethernet node_fileserver_hw_adapter_fs_driver = N/A node_fileserver_hw_adapter_fs_firmware = 1.8-0 node_fileserver_sw_os = NetApp Release 8.2.3P2 7-Mode node_fileserver_sw_localfile = None node_fileserver_sw_sharedfile = NFSv3 node_fileserver_sw_state = Multi-User, run level 3 node_fileserver_sw_other = None # # IB interconnect # interconnect_opa_label = Omni-Path Architecture(MPI) interconnect_opa_order = 1 interconnect_opa_purpose = MPI traffic interconnect_opa_hw_vendor = Intel interconnect_opa_hw_model = Intel Omni-Path 100 Series interconnect_opa_hw_switch_100_model = Intel Omni-Path 100 Series interconnect_opa_hw_switch_100_count = 25 interconnect_opa_hw_switch_100_ports = 48 interconnect_opa_hw_topo = 2:1 Blocking Fat tree interconnect_opa_hw_switch_100_data_rate = 100Gbps interconnect_opa_hw_switch_100_firmware = 10.3.0.0.81 # # Cluster file system interconnect # interconnect_fs_label = Gigabit Ethernet(I/O) interconnect_fs_order = 2 interconnect_fs_purpose = Cluster File System interconnect_fs_hw_vendor = Cisco interconnect_fs_hw_model = Ethernet 40 Gbps interconnect_fs_hw_switch_fs_model = Cisco Nexus5020, N5K-C5020P-BF interconnect_fs_hw_switch_fs_count = 1 interconnect_fs_hw_switch_fs_ports = 96 interconnect_fs_hw_topo = Star interconnect_fs_hw_switch_fs_data_rate = 40Gbps interconnect_fs_hw_switch_fs_firmware = 5.2(1)N1(9a) # # Hardware # system_class = Homogeneous max_ranks = 144 max_peak_ranks = 144 # # Software # sw_c_compiler000 = Intel C++ Composer XE 2017 for Linux, Version sw_c_compiler001 = 17.0.2.174 sw_cxx_compiler000 = Intel C++ Composer XE 2017 for Linux, Version sw_cxx_compiler001 = 17.0.2.174 sw_f_compiler000 = Intel Fortran Composer XE 2017 for Linux, Version sw_f_compiler001 = 17.0.2.174 sw_base_ptrsize = 64-bit sw_peak_ptrsize = 64-bit sw_mpi_library000 = Intel MPI Library 2017 for Linux, sw_mpi_library001 = Version 2017.1.132 sw_mpi_other = None sw_preprocessors = No sw_other = None # # General notes # # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: flagsurl000 = http://www.spec.org/mpi2007/flags/RWTH-Aachen-CLAIX-MPI-2017-SEP.xml notes_000 =130.socorro (base): "nullify_ptrs" src.alt was used. notes_005 =